2024-12-17 12:37:33,466 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@50378a4 2024-12-17 12:37:33,476 main DEBUG Took 0.008367 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-17 12:37:33,477 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-17 12:37:33,477 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-17 12:37:33,478 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-17 12:37:33,478 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-17 12:37:33,484 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-17 12:37:33,495 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-17 12:37:33,496 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-17 12:37:33,497 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-17 12:37:33,497 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-17 12:37:33,498 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-17 12:37:33,498 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-17 12:37:33,499 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-17 12:37:33,499 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-17 12:37:33,499 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-17 12:37:33,500 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-17 12:37:33,500 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-17 12:37:33,500 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-17 12:37:33,501 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-17 12:37:33,501 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-17 12:37:33,501 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-17 12:37:33,502 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-17 12:37:33,502 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-17 12:37:33,502 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-17 12:37:33,503 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-17 12:37:33,503 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-17 12:37:33,503 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-17 12:37:33,503 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-17 12:37:33,504 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-17 12:37:33,504 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-17 12:37:33,504 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-17 12:37:33,504 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-17 12:37:33,506 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-17 12:37:33,507 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-17 12:37:33,508 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-17 12:37:33,509 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-17 12:37:33,510 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-17 12:37:33,510 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-17 12:37:33,517 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-17 12:37:33,519 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-17 12:37:33,521 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-17 12:37:33,521 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-17 12:37:33,522 main DEBUG createAppenders(={Console}) 2024-12-17 12:37:33,522 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@50378a4 initialized 2024-12-17 12:37:33,523 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@50378a4 2024-12-17 12:37:33,523 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@50378a4 OK. 2024-12-17 12:37:33,523 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-17 12:37:33,524 main DEBUG OutputStream closed 2024-12-17 12:37:33,524 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-17 12:37:33,524 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-17 12:37:33,524 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@66f57048 OK 2024-12-17 12:37:33,605 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-17 12:37:33,607 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-17 12:37:33,608 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-17 12:37:33,609 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-17 12:37:33,610 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-17 12:37:33,610 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-17 12:37:33,611 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-17 12:37:33,611 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-17 12:37:33,612 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-17 12:37:33,612 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-17 12:37:33,612 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-17 12:37:33,613 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-17 12:37:33,613 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-17 12:37:33,614 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-17 12:37:33,614 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-17 12:37:33,614 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-17 12:37:33,615 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-17 12:37:33,616 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-17 12:37:33,618 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-17 12:37:33,619 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@2766ca9d) with optional ClassLoader: null 2024-12-17 12:37:33,619 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-17 12:37:33,620 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@2766ca9d] started OK. 2024-12-17T12:37:33,854 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/63e851ed-2e94-2a83-69e0-0dcd439348a2 2024-12-17 12:37:33,857 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-17 12:37:33,858 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-17T12:37:33,866 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.TestAcidGuaranteesWithAdaptivePolicy timeout: 13 mins 2024-12-17T12:37:33,889 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-17T12:37:33,893 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/63e851ed-2e94-2a83-69e0-0dcd439348a2/cluster_e84f10d9-83a3-7112-3c7b-cf7e72d3a51d, deleteOnExit=true 2024-12-17T12:37:33,894 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-17T12:37:33,895 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/63e851ed-2e94-2a83-69e0-0dcd439348a2/test.cache.data in system properties and HBase conf 2024-12-17T12:37:33,895 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/63e851ed-2e94-2a83-69e0-0dcd439348a2/hadoop.tmp.dir in system properties and HBase conf 2024-12-17T12:37:33,896 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/63e851ed-2e94-2a83-69e0-0dcd439348a2/hadoop.log.dir in system properties and HBase conf 2024-12-17T12:37:33,897 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/63e851ed-2e94-2a83-69e0-0dcd439348a2/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-17T12:37:33,898 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/63e851ed-2e94-2a83-69e0-0dcd439348a2/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-17T12:37:33,898 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-17T12:37:33,994 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-17T12:37:34,084 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-17T12:37:34,087 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/63e851ed-2e94-2a83-69e0-0dcd439348a2/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-17T12:37:34,088 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/63e851ed-2e94-2a83-69e0-0dcd439348a2/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-17T12:37:34,089 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/63e851ed-2e94-2a83-69e0-0dcd439348a2/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-17T12:37:34,089 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/63e851ed-2e94-2a83-69e0-0dcd439348a2/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-17T12:37:34,090 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/63e851ed-2e94-2a83-69e0-0dcd439348a2/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-17T12:37:34,090 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/63e851ed-2e94-2a83-69e0-0dcd439348a2/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-17T12:37:34,091 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/63e851ed-2e94-2a83-69e0-0dcd439348a2/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-17T12:37:34,091 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/63e851ed-2e94-2a83-69e0-0dcd439348a2/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-17T12:37:34,092 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/63e851ed-2e94-2a83-69e0-0dcd439348a2/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-17T12:37:34,092 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/63e851ed-2e94-2a83-69e0-0dcd439348a2/nfs.dump.dir in system properties and HBase conf 2024-12-17T12:37:34,093 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/63e851ed-2e94-2a83-69e0-0dcd439348a2/java.io.tmpdir in system properties and HBase conf 2024-12-17T12:37:34,093 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/63e851ed-2e94-2a83-69e0-0dcd439348a2/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-17T12:37:34,094 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/63e851ed-2e94-2a83-69e0-0dcd439348a2/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-17T12:37:34,094 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/63e851ed-2e94-2a83-69e0-0dcd439348a2/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-17T12:37:35,018 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-17T12:37:35,084 INFO [Time-limited test {}] log.Log(170): Logging initialized @2213ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-17T12:37:35,154 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-17T12:37:35,211 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-17T12:37:35,229 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-17T12:37:35,229 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-17T12:37:35,230 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-17T12:37:35,242 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-17T12:37:35,244 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@625ac51e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/63e851ed-2e94-2a83-69e0-0dcd439348a2/hadoop.log.dir/,AVAILABLE} 2024-12-17T12:37:35,245 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8167a4c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-17T12:37:35,397 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@52042c53{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/63e851ed-2e94-2a83-69e0-0dcd439348a2/java.io.tmpdir/jetty-localhost-37585-hadoop-hdfs-3_4_1-tests_jar-_-any-4561247164249860418/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-17T12:37:35,406 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@45e9671d{HTTP/1.1, (http/1.1)}{localhost:37585} 2024-12-17T12:37:35,406 INFO [Time-limited test {}] server.Server(415): Started @2536ms 2024-12-17T12:37:35,884 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-17T12:37:35,890 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-17T12:37:35,891 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-17T12:37:35,891 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-17T12:37:35,891 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-17T12:37:35,892 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6f3c9073{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/63e851ed-2e94-2a83-69e0-0dcd439348a2/hadoop.log.dir/,AVAILABLE} 2024-12-17T12:37:35,893 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@637efe93{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-17T12:37:35,985 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5e967c25{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/63e851ed-2e94-2a83-69e0-0dcd439348a2/java.io.tmpdir/jetty-localhost-33297-hadoop-hdfs-3_4_1-tests_jar-_-any-11498861129868686586/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-17T12:37:35,985 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1e95663c{HTTP/1.1, (http/1.1)}{localhost:33297} 2024-12-17T12:37:35,985 INFO [Time-limited test {}] server.Server(415): Started @3115ms 2024-12-17T12:37:36,031 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-17T12:37:36,754 WARN [Thread-73 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/63e851ed-2e94-2a83-69e0-0dcd439348a2/cluster_e84f10d9-83a3-7112-3c7b-cf7e72d3a51d/dfs/data/data2/current/BP-1712470660-172.17.0.2-1734439054599/current, will proceed with Du for space computation calculation, 2024-12-17T12:37:36,754 WARN [Thread-72 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/63e851ed-2e94-2a83-69e0-0dcd439348a2/cluster_e84f10d9-83a3-7112-3c7b-cf7e72d3a51d/dfs/data/data1/current/BP-1712470660-172.17.0.2-1734439054599/current, will proceed with Du for space computation calculation, 2024-12-17T12:37:36,778 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-17T12:37:36,820 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x41e3b3e760a08203 with lease ID 0xbaa03922adf6568: Processing first storage report for DS-726b5a5c-ba6b-4ab0-92e7-7f8811d90233 from datanode DatanodeRegistration(127.0.0.1:35695, datanodeUuid=8a8aa23a-bee5-4781-95e2-e35bdb010347, infoPort=38587, infoSecurePort=0, ipcPort=38613, storageInfo=lv=-57;cid=testClusterID;nsid=1693513447;c=1734439054599) 2024-12-17T12:37:36,821 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x41e3b3e760a08203 with lease ID 0xbaa03922adf6568: from storage DS-726b5a5c-ba6b-4ab0-92e7-7f8811d90233 node DatanodeRegistration(127.0.0.1:35695, datanodeUuid=8a8aa23a-bee5-4781-95e2-e35bdb010347, infoPort=38587, infoSecurePort=0, ipcPort=38613, storageInfo=lv=-57;cid=testClusterID;nsid=1693513447;c=1734439054599), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-17T12:37:36,821 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x41e3b3e760a08203 with lease ID 0xbaa03922adf6568: Processing first storage report for DS-fc3a6f26-a267-4c2f-872f-6f2b0d2f3e96 from datanode DatanodeRegistration(127.0.0.1:35695, datanodeUuid=8a8aa23a-bee5-4781-95e2-e35bdb010347, infoPort=38587, infoSecurePort=0, ipcPort=38613, storageInfo=lv=-57;cid=testClusterID;nsid=1693513447;c=1734439054599) 2024-12-17T12:37:36,822 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x41e3b3e760a08203 with lease ID 0xbaa03922adf6568: from storage DS-fc3a6f26-a267-4c2f-872f-6f2b0d2f3e96 node DatanodeRegistration(127.0.0.1:35695, datanodeUuid=8a8aa23a-bee5-4781-95e2-e35bdb010347, infoPort=38587, infoSecurePort=0, ipcPort=38613, storageInfo=lv=-57;cid=testClusterID;nsid=1693513447;c=1734439054599), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-17T12:37:36,888 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/63e851ed-2e94-2a83-69e0-0dcd439348a2 2024-12-17T12:37:36,953 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/63e851ed-2e94-2a83-69e0-0dcd439348a2/cluster_e84f10d9-83a3-7112-3c7b-cf7e72d3a51d/zookeeper_0, clientPort=59557, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/63e851ed-2e94-2a83-69e0-0dcd439348a2/cluster_e84f10d9-83a3-7112-3c7b-cf7e72d3a51d/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/63e851ed-2e94-2a83-69e0-0dcd439348a2/cluster_e84f10d9-83a3-7112-3c7b-cf7e72d3a51d/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-17T12:37:36,964 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=59557 2024-12-17T12:37:36,976 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-17T12:37:36,979 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-17T12:37:37,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741825_1001 (size=7) 2024-12-17T12:37:37,575 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9 with version=8 2024-12-17T12:37:37,576 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/hbase-staging 2024-12-17T12:37:37,675 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-17T12:37:37,904 INFO [Time-limited test {}] client.ConnectionUtils(129): master/681c08bfdbdf:0 server-side Connection retries=45 2024-12-17T12:37:37,918 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-17T12:37:37,919 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-17T12:37:37,919 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-17T12:37:37,919 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-17T12:37:37,919 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-17T12:37:38,024 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-17T12:37:38,071 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-17T12:37:38,078 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-17T12:37:38,081 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-17T12:37:38,100 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 43419 (auto-detected) 2024-12-17T12:37:38,101 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-17T12:37:38,115 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:38693 2024-12-17T12:37:38,122 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-17T12:37:38,124 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-17T12:37:38,134 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:38693 connecting to ZooKeeper ensemble=127.0.0.1:59557 2024-12-17T12:37:38,228 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:386930x0, quorum=127.0.0.1:59557, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-17T12:37:38,230 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38693-0x10033fed2a90000 connected 2024-12-17T12:37:38,297 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38693-0x10033fed2a90000, quorum=127.0.0.1:59557, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-17T12:37:38,302 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38693-0x10033fed2a90000, quorum=127.0.0.1:59557, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-17T12:37:38,306 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38693-0x10033fed2a90000, quorum=127.0.0.1:59557, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-17T12:37:38,310 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38693 2024-12-17T12:37:38,311 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38693 2024-12-17T12:37:38,311 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38693 2024-12-17T12:37:38,312 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38693 2024-12-17T12:37:38,313 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38693 2024-12-17T12:37:38,320 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9, hbase.cluster.distributed=false 2024-12-17T12:37:38,373 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/681c08bfdbdf:0 server-side Connection retries=45 2024-12-17T12:37:38,373 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-17T12:37:38,373 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-17T12:37:38,373 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-17T12:37:38,374 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-17T12:37:38,374 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-17T12:37:38,376 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-17T12:37:38,378 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-17T12:37:38,378 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:36491 2024-12-17T12:37:38,380 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-17T12:37:38,384 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-17T12:37:38,385 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-17T12:37:38,388 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-17T12:37:38,391 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:36491 connecting to ZooKeeper ensemble=127.0.0.1:59557 2024-12-17T12:37:38,600 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:364910x0, quorum=127.0.0.1:59557, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-17T12:37:38,601 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36491-0x10033fed2a90001 connected 2024-12-17T12:37:38,602 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36491-0x10033fed2a90001, quorum=127.0.0.1:59557, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-17T12:37:38,606 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36491-0x10033fed2a90001, quorum=127.0.0.1:59557, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-17T12:37:38,608 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36491-0x10033fed2a90001, quorum=127.0.0.1:59557, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-17T12:37:38,609 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36491 2024-12-17T12:37:38,609 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36491 2024-12-17T12:37:38,609 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36491 2024-12-17T12:37:38,610 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36491 2024-12-17T12:37:38,611 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36491 2024-12-17T12:37:38,613 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/681c08bfdbdf,38693,1734439057670 2024-12-17T12:37:38,625 DEBUG [M:0;681c08bfdbdf:38693 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;681c08bfdbdf:38693 2024-12-17T12:37:38,628 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36491-0x10033fed2a90001, quorum=127.0.0.1:59557, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-17T12:37:38,628 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38693-0x10033fed2a90000, quorum=127.0.0.1:59557, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-17T12:37:38,629 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38693-0x10033fed2a90000, quorum=127.0.0.1:59557, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/681c08bfdbdf,38693,1734439057670 2024-12-17T12:37:38,652 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38693-0x10033fed2a90000, quorum=127.0.0.1:59557, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-17T12:37:38,652 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36491-0x10033fed2a90001, quorum=127.0.0.1:59557, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-17T12:37:38,652 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38693-0x10033fed2a90000, quorum=127.0.0.1:59557, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-17T12:37:38,652 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36491-0x10033fed2a90001, quorum=127.0.0.1:59557, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-17T12:37:38,653 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38693-0x10033fed2a90000, quorum=127.0.0.1:59557, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-17T12:37:38,654 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/681c08bfdbdf,38693,1734439057670 from backup master directory 2024-12-17T12:37:38,654 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:38693-0x10033fed2a90000, quorum=127.0.0.1:59557, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-17T12:37:38,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36491-0x10033fed2a90001, quorum=127.0.0.1:59557, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-17T12:37:38,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38693-0x10033fed2a90000, quorum=127.0.0.1:59557, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/681c08bfdbdf,38693,1734439057670 2024-12-17T12:37:38,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38693-0x10033fed2a90000, quorum=127.0.0.1:59557, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-17T12:37:38,661 WARN [master/681c08bfdbdf:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-17T12:37:38,661 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=681c08bfdbdf,38693,1734439057670 2024-12-17T12:37:38,663 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-17T12:37:38,664 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-17T12:37:38,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741826_1002 (size=42) 2024-12-17T12:37:39,126 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/hbase.id with ID: 92474b18-507e-4f2b-98b2-ac01a28879c8 2024-12-17T12:37:39,171 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-17T12:37:39,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38693-0x10033fed2a90000, quorum=127.0.0.1:59557, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-17T12:37:39,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36491-0x10033fed2a90001, quorum=127.0.0.1:59557, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-17T12:37:39,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741827_1003 (size=196) 2024-12-17T12:37:39,638 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-17T12:37:39,640 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-17T12:37:39,653 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:39,656 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-17T12:37:39,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741828_1004 (size=1189) 2024-12-17T12:37:40,100 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/MasterData/data/master/store 2024-12-17T12:37:40,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741829_1005 (size=34) 2024-12-17T12:37:40,117 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-17T12:37:40,117 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-17T12:37:40,118 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-17T12:37:40,118 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-17T12:37:40,119 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-17T12:37:40,119 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-17T12:37:40,119 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-17T12:37:40,119 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-17T12:37:40,119 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-17T12:37:40,121 WARN [master/681c08bfdbdf:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/MasterData/data/master/store/.initializing 2024-12-17T12:37:40,121 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/MasterData/WALs/681c08bfdbdf,38693,1734439057670 2024-12-17T12:37:40,127 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-17T12:37:40,137 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=681c08bfdbdf%2C38693%2C1734439057670, suffix=, logDir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/MasterData/WALs/681c08bfdbdf,38693,1734439057670, archiveDir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/MasterData/oldWALs, maxLogs=10 2024-12-17T12:37:40,153 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/MasterData/WALs/681c08bfdbdf,38693,1734439057670/681c08bfdbdf%2C38693%2C1734439057670.1734439060141, exclude list is [], retry=0 2024-12-17T12:37:40,166 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35695,DS-726b5a5c-ba6b-4ab0-92e7-7f8811d90233,DISK] 2024-12-17T12:37:40,169 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-17T12:37:40,198 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/MasterData/WALs/681c08bfdbdf,38693,1734439057670/681c08bfdbdf%2C38693%2C1734439057670.1734439060141 2024-12-17T12:37:40,199 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38587:38587)] 2024-12-17T12:37:40,200 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-17T12:37:40,200 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-17T12:37:40,203 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-17T12:37:40,204 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-17T12:37:40,237 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-17T12:37:40,256 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-17T12:37:40,259 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:37:40,261 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-17T12:37:40,262 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-17T12:37:40,265 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-17T12:37:40,265 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:37:40,266 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-17T12:37:40,267 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-17T12:37:40,269 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-17T12:37:40,269 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:37:40,270 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-17T12:37:40,270 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-17T12:37:40,273 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-17T12:37:40,273 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:37:40,274 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-17T12:37:40,278 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-17T12:37:40,279 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-17T12:37:40,287 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-17T12:37:40,291 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-17T12:37:40,296 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-17T12:37:40,297 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67385626, jitterRate=0.004124075174331665}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-17T12:37:40,301 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-17T12:37:40,302 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-17T12:37:40,326 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f9e0f1b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:37:40,352 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-17T12:37:40,361 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-17T12:37:40,361 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-17T12:37:40,363 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-17T12:37:40,364 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-12-17T12:37:40,368 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 3 msec 2024-12-17T12:37:40,368 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-17T12:37:40,389 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-17T12:37:40,399 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38693-0x10033fed2a90000, quorum=127.0.0.1:59557, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-17T12:37:40,443 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-17T12:37:40,446 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-17T12:37:40,447 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38693-0x10033fed2a90000, quorum=127.0.0.1:59557, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-17T12:37:40,457 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-17T12:37:40,459 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-17T12:37:40,462 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38693-0x10033fed2a90000, quorum=127.0.0.1:59557, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-17T12:37:40,468 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-17T12:37:40,470 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38693-0x10033fed2a90000, quorum=127.0.0.1:59557, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-17T12:37:40,477 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-17T12:37:40,489 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38693-0x10033fed2a90000, quorum=127.0.0.1:59557, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-17T12:37:40,498 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-17T12:37:40,510 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36491-0x10033fed2a90001, quorum=127.0.0.1:59557, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-17T12:37:40,510 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38693-0x10033fed2a90000, quorum=127.0.0.1:59557, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-17T12:37:40,511 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36491-0x10033fed2a90001, quorum=127.0.0.1:59557, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-17T12:37:40,511 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38693-0x10033fed2a90000, quorum=127.0.0.1:59557, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-17T12:37:40,512 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=681c08bfdbdf,38693,1734439057670, sessionid=0x10033fed2a90000, setting cluster-up flag (Was=false) 2024-12-17T12:37:40,540 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36491-0x10033fed2a90001, quorum=127.0.0.1:59557, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-17T12:37:40,540 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38693-0x10033fed2a90000, quorum=127.0.0.1:59557, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-17T12:37:40,568 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-17T12:37:40,570 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=681c08bfdbdf,38693,1734439057670 2024-12-17T12:37:40,590 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38693-0x10033fed2a90000, quorum=127.0.0.1:59557, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-17T12:37:40,590 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36491-0x10033fed2a90001, quorum=127.0.0.1:59557, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-17T12:37:40,619 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-17T12:37:40,622 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=681c08bfdbdf,38693,1734439057670 2024-12-17T12:37:40,696 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-17T12:37:40,701 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-17T12:37:40,703 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-17T12:37:40,708 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 681c08bfdbdf,38693,1734439057670 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-17T12:37:40,711 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/681c08bfdbdf:0, corePoolSize=5, maxPoolSize=5 2024-12-17T12:37:40,711 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/681c08bfdbdf:0, corePoolSize=5, maxPoolSize=5 2024-12-17T12:37:40,711 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/681c08bfdbdf:0, corePoolSize=5, maxPoolSize=5 2024-12-17T12:37:40,711 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/681c08bfdbdf:0, corePoolSize=5, maxPoolSize=5 2024-12-17T12:37:40,712 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/681c08bfdbdf:0, corePoolSize=10, maxPoolSize=10 2024-12-17T12:37:40,712 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/681c08bfdbdf:0, corePoolSize=1, maxPoolSize=1 2024-12-17T12:37:40,712 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/681c08bfdbdf:0, corePoolSize=2, maxPoolSize=2 2024-12-17T12:37:40,712 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/681c08bfdbdf:0, corePoolSize=1, maxPoolSize=1 2024-12-17T12:37:40,713 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1734439090713 2024-12-17T12:37:40,715 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-17T12:37:40,716 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-17T12:37:40,716 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-17T12:37:40,717 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-17T12:37:40,719 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-17T12:37:40,719 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-17T12:37:40,719 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-17T12:37:40,719 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-17T12:37:40,720 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-17T12:37:40,720 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:37:40,720 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-17T12:37:40,721 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-17T12:37:40,722 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-17T12:37:40,722 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-17T12:37:40,725 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-17T12:37:40,725 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-17T12:37:40,727 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/681c08bfdbdf:0:becomeActiveMaster-HFileCleaner.large.0-1734439060726,5,FailOnTimeoutGroup] 2024-12-17T12:37:40,730 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/681c08bfdbdf:0:becomeActiveMaster-HFileCleaner.small.0-1734439060727,5,FailOnTimeoutGroup] 2024-12-17T12:37:40,730 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-17T12:37:40,730 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-17T12:37:40,731 DEBUG [RS:0;681c08bfdbdf:36491 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;681c08bfdbdf:36491 2024-12-17T12:37:40,731 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-17T12:37:40,731 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-17T12:37:40,732 INFO [RS:0;681c08bfdbdf:36491 {}] regionserver.HRegionServer(1008): ClusterId : 92474b18-507e-4f2b-98b2-ac01a28879c8 2024-12-17T12:37:40,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741831_1007 (size=1039) 2024-12-17T12:37:40,735 DEBUG [RS:0;681c08bfdbdf:36491 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-17T12:37:40,745 DEBUG [RS:0;681c08bfdbdf:36491 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-17T12:37:40,745 DEBUG [RS:0;681c08bfdbdf:36491 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-17T12:37:40,753 DEBUG [RS:0;681c08bfdbdf:36491 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-17T12:37:40,754 DEBUG [RS:0;681c08bfdbdf:36491 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f40afcb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:37:40,756 DEBUG [RS:0;681c08bfdbdf:36491 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@527d1d41, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=681c08bfdbdf/172.17.0.2:0 2024-12-17T12:37:40,760 INFO [RS:0;681c08bfdbdf:36491 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-17T12:37:40,760 INFO [RS:0;681c08bfdbdf:36491 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-17T12:37:40,760 DEBUG [RS:0;681c08bfdbdf:36491 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-17T12:37:40,762 INFO [RS:0;681c08bfdbdf:36491 {}] regionserver.HRegionServer(3073): reportForDuty to master=681c08bfdbdf,38693,1734439057670 with isa=681c08bfdbdf/172.17.0.2:36491, startcode=1734439058372 2024-12-17T12:37:40,776 DEBUG [RS:0;681c08bfdbdf:36491 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-17T12:37:40,808 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53899, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-17T12:37:40,814 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38693 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:40,816 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38693 {}] master.ServerManager(486): Registering regionserver=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:40,829 DEBUG [RS:0;681c08bfdbdf:36491 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9 2024-12-17T12:37:40,829 DEBUG [RS:0;681c08bfdbdf:36491 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:38223 2024-12-17T12:37:40,829 DEBUG [RS:0;681c08bfdbdf:36491 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-17T12:37:40,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38693-0x10033fed2a90000, quorum=127.0.0.1:59557, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-17T12:37:40,841 DEBUG [RS:0;681c08bfdbdf:36491 {}] zookeeper.ZKUtil(111): regionserver:36491-0x10033fed2a90001, quorum=127.0.0.1:59557, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:40,841 WARN [RS:0;681c08bfdbdf:36491 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-17T12:37:40,841 INFO [RS:0;681c08bfdbdf:36491 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-17T12:37:40,841 DEBUG [RS:0;681c08bfdbdf:36491 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/WALs/681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:40,843 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [681c08bfdbdf,36491,1734439058372] 2024-12-17T12:37:40,855 DEBUG [RS:0;681c08bfdbdf:36491 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-17T12:37:40,865 INFO [RS:0;681c08bfdbdf:36491 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-17T12:37:40,880 INFO [RS:0;681c08bfdbdf:36491 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-17T12:37:40,882 INFO [RS:0;681c08bfdbdf:36491 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-17T12:37:40,882 INFO [RS:0;681c08bfdbdf:36491 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-17T12:37:40,883 INFO [RS:0;681c08bfdbdf:36491 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-17T12:37:40,888 INFO [RS:0;681c08bfdbdf:36491 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-17T12:37:40,888 DEBUG [RS:0;681c08bfdbdf:36491 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/681c08bfdbdf:0, corePoolSize=1, maxPoolSize=1 2024-12-17T12:37:40,888 DEBUG [RS:0;681c08bfdbdf:36491 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/681c08bfdbdf:0, corePoolSize=1, maxPoolSize=1 2024-12-17T12:37:40,888 DEBUG [RS:0;681c08bfdbdf:36491 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/681c08bfdbdf:0, corePoolSize=1, maxPoolSize=1 2024-12-17T12:37:40,889 DEBUG [RS:0;681c08bfdbdf:36491 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/681c08bfdbdf:0, corePoolSize=1, maxPoolSize=1 2024-12-17T12:37:40,889 DEBUG [RS:0;681c08bfdbdf:36491 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/681c08bfdbdf:0, corePoolSize=1, maxPoolSize=1 2024-12-17T12:37:40,889 DEBUG [RS:0;681c08bfdbdf:36491 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/681c08bfdbdf:0, corePoolSize=2, maxPoolSize=2 2024-12-17T12:37:40,889 DEBUG [RS:0;681c08bfdbdf:36491 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/681c08bfdbdf:0, corePoolSize=1, maxPoolSize=1 2024-12-17T12:37:40,889 DEBUG [RS:0;681c08bfdbdf:36491 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/681c08bfdbdf:0, corePoolSize=1, maxPoolSize=1 2024-12-17T12:37:40,889 DEBUG [RS:0;681c08bfdbdf:36491 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/681c08bfdbdf:0, corePoolSize=1, maxPoolSize=1 2024-12-17T12:37:40,889 DEBUG [RS:0;681c08bfdbdf:36491 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/681c08bfdbdf:0, corePoolSize=1, maxPoolSize=1 2024-12-17T12:37:40,889 DEBUG [RS:0;681c08bfdbdf:36491 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/681c08bfdbdf:0, corePoolSize=1, maxPoolSize=1 2024-12-17T12:37:40,890 DEBUG [RS:0;681c08bfdbdf:36491 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/681c08bfdbdf:0, corePoolSize=3, maxPoolSize=3 2024-12-17T12:37:40,890 DEBUG [RS:0;681c08bfdbdf:36491 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0, corePoolSize=3, maxPoolSize=3 2024-12-17T12:37:40,890 INFO [RS:0;681c08bfdbdf:36491 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-17T12:37:40,891 INFO [RS:0;681c08bfdbdf:36491 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-17T12:37:40,891 INFO [RS:0;681c08bfdbdf:36491 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-17T12:37:40,891 INFO [RS:0;681c08bfdbdf:36491 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-17T12:37:40,891 INFO [RS:0;681c08bfdbdf:36491 {}] hbase.ChoreService(168): Chore ScheduledChore name=681c08bfdbdf,36491,1734439058372-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-17T12:37:40,907 INFO [RS:0;681c08bfdbdf:36491 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-17T12:37:40,909 INFO [RS:0;681c08bfdbdf:36491 {}] hbase.ChoreService(168): Chore ScheduledChore name=681c08bfdbdf,36491,1734439058372-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-17T12:37:40,929 INFO [RS:0;681c08bfdbdf:36491 {}] regionserver.Replication(204): 681c08bfdbdf,36491,1734439058372 started 2024-12-17T12:37:40,929 INFO [RS:0;681c08bfdbdf:36491 {}] regionserver.HRegionServer(1767): Serving as 681c08bfdbdf,36491,1734439058372, RpcServer on 681c08bfdbdf/172.17.0.2:36491, sessionid=0x10033fed2a90001 2024-12-17T12:37:40,930 DEBUG [RS:0;681c08bfdbdf:36491 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-17T12:37:40,930 DEBUG [RS:0;681c08bfdbdf:36491 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:40,930 DEBUG [RS:0;681c08bfdbdf:36491 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '681c08bfdbdf,36491,1734439058372' 2024-12-17T12:37:40,930 DEBUG [RS:0;681c08bfdbdf:36491 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-17T12:37:40,931 DEBUG [RS:0;681c08bfdbdf:36491 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-17T12:37:40,932 DEBUG [RS:0;681c08bfdbdf:36491 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-17T12:37:40,932 DEBUG [RS:0;681c08bfdbdf:36491 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-17T12:37:40,932 DEBUG [RS:0;681c08bfdbdf:36491 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:40,932 DEBUG [RS:0;681c08bfdbdf:36491 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '681c08bfdbdf,36491,1734439058372' 2024-12-17T12:37:40,932 DEBUG [RS:0;681c08bfdbdf:36491 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-17T12:37:40,932 DEBUG [RS:0;681c08bfdbdf:36491 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-17T12:37:40,933 DEBUG [RS:0;681c08bfdbdf:36491 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-17T12:37:40,933 INFO [RS:0;681c08bfdbdf:36491 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-17T12:37:40,933 INFO [RS:0;681c08bfdbdf:36491 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-17T12:37:41,047 INFO [RS:0;681c08bfdbdf:36491 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-17T12:37:41,050 INFO [RS:0;681c08bfdbdf:36491 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=681c08bfdbdf%2C36491%2C1734439058372, suffix=, logDir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/WALs/681c08bfdbdf,36491,1734439058372, archiveDir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/oldWALs, maxLogs=32 2024-12-17T12:37:41,063 DEBUG [RS:0;681c08bfdbdf:36491 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/WALs/681c08bfdbdf,36491,1734439058372/681c08bfdbdf%2C36491%2C1734439058372.1734439061052, exclude list is [], retry=0 2024-12-17T12:37:41,067 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35695,DS-726b5a5c-ba6b-4ab0-92e7-7f8811d90233,DISK] 2024-12-17T12:37:41,070 INFO [RS:0;681c08bfdbdf:36491 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/WALs/681c08bfdbdf,36491,1734439058372/681c08bfdbdf%2C36491%2C1734439058372.1734439061052 2024-12-17T12:37:41,071 DEBUG [RS:0;681c08bfdbdf:36491 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38587:38587)] 2024-12-17T12:37:41,136 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-17T12:37:41,137 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9 2024-12-17T12:37:41,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741833_1009 (size=32) 2024-12-17T12:37:41,555 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-17T12:37:41,559 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-17T12:37:41,562 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-17T12:37:41,562 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:37:41,563 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-17T12:37:41,563 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-17T12:37:41,566 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-17T12:37:41,566 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:37:41,567 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-17T12:37:41,567 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-17T12:37:41,569 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-17T12:37:41,569 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:37:41,571 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-17T12:37:41,572 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/hbase/meta/1588230740 2024-12-17T12:37:41,574 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/hbase/meta/1588230740 2024-12-17T12:37:41,577 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-17T12:37:41,581 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-17T12:37:41,585 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-17T12:37:41,586 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75154673, jitterRate=0.11989189684391022}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-17T12:37:41,588 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-17T12:37:41,588 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-17T12:37:41,588 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-17T12:37:41,588 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-17T12:37:41,588 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-17T12:37:41,588 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-17T12:37:41,589 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-17T12:37:41,589 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-17T12:37:41,592 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-17T12:37:41,592 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-17T12:37:41,597 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-17T12:37:41,604 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-17T12:37:41,606 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-17T12:37:41,759 DEBUG [681c08bfdbdf:38693 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-17T12:37:41,767 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:41,775 INFO [PEWorker-2 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 681c08bfdbdf,36491,1734439058372, state=OPENING 2024-12-17T12:37:41,818 DEBUG [PEWorker-2 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-17T12:37:41,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38693-0x10033fed2a90000, quorum=127.0.0.1:59557, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-17T12:37:41,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36491-0x10033fed2a90001, quorum=127.0.0.1:59557, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-17T12:37:41,829 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-17T12:37:41,829 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-17T12:37:41,832 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=681c08bfdbdf,36491,1734439058372}] 2024-12-17T12:37:42,012 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:42,014 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-17T12:37:42,017 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35832, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-17T12:37:42,027 INFO [RS_OPEN_META-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-17T12:37:42,027 INFO [RS_OPEN_META-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-17T12:37:42,028 INFO [RS_OPEN_META-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-17T12:37:42,031 INFO [RS_OPEN_META-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=681c08bfdbdf%2C36491%2C1734439058372.meta, suffix=.meta, logDir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/WALs/681c08bfdbdf,36491,1734439058372, archiveDir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/oldWALs, maxLogs=32 2024-12-17T12:37:42,043 DEBUG [RS_OPEN_META-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/WALs/681c08bfdbdf,36491,1734439058372/681c08bfdbdf%2C36491%2C1734439058372.meta.1734439062032.meta, exclude list is [], retry=0 2024-12-17T12:37:42,046 DEBUG [RS-EventLoopGroup-3-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35695,DS-726b5a5c-ba6b-4ab0-92e7-7f8811d90233,DISK] 2024-12-17T12:37:42,049 INFO [RS_OPEN_META-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/WALs/681c08bfdbdf,36491,1734439058372/681c08bfdbdf%2C36491%2C1734439058372.meta.1734439062032.meta 2024-12-17T12:37:42,049 DEBUG [RS_OPEN_META-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38587:38587)] 2024-12-17T12:37:42,050 DEBUG [RS_OPEN_META-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-17T12:37:42,051 DEBUG [RS_OPEN_META-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-17T12:37:42,097 DEBUG [RS_OPEN_META-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-17T12:37:42,101 INFO [RS_OPEN_META-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-17T12:37:42,104 DEBUG [RS_OPEN_META-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-17T12:37:42,105 DEBUG [RS_OPEN_META-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-17T12:37:42,105 DEBUG [RS_OPEN_META-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-17T12:37:42,105 DEBUG [RS_OPEN_META-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-17T12:37:42,107 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-17T12:37:42,109 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-17T12:37:42,109 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:37:42,110 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-17T12:37:42,110 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-17T12:37:42,111 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-17T12:37:42,112 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:37:42,112 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-17T12:37:42,113 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-17T12:37:42,114 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-17T12:37:42,114 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:37:42,115 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-17T12:37:42,116 DEBUG [RS_OPEN_META-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/hbase/meta/1588230740 2024-12-17T12:37:42,119 DEBUG [RS_OPEN_META-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/hbase/meta/1588230740 2024-12-17T12:37:42,121 DEBUG [RS_OPEN_META-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-17T12:37:42,124 DEBUG [RS_OPEN_META-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-17T12:37:42,125 INFO [RS_OPEN_META-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61581978, jitterRate=-0.0823570191860199}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-17T12:37:42,126 DEBUG [RS_OPEN_META-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-17T12:37:42,132 INFO [RS_OPEN_META-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1734439062006 2024-12-17T12:37:42,141 DEBUG [RS_OPEN_META-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-17T12:37:42,142 INFO [RS_OPEN_META-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-17T12:37:42,143 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:42,144 INFO [PEWorker-4 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 681c08bfdbdf,36491,1734439058372, state=OPEN 2024-12-17T12:37:42,225 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36491-0x10033fed2a90001, quorum=127.0.0.1:59557, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-17T12:37:42,225 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38693-0x10033fed2a90000, quorum=127.0.0.1:59557, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-17T12:37:42,225 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-17T12:37:42,225 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-17T12:37:42,232 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-17T12:37:42,232 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=681c08bfdbdf,36491,1734439058372 in 394 msec 2024-12-17T12:37:42,239 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-17T12:37:42,239 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 636 msec 2024-12-17T12:37:42,244 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.5860 sec 2024-12-17T12:37:42,244 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1734439062244, completionTime=-1 2024-12-17T12:37:42,244 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-17T12:37:42,245 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-17T12:37:42,276 DEBUG [hconnection-0x3fe77b6d-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:37:42,278 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35848, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:37:42,287 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-17T12:37:42,287 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1734439122287 2024-12-17T12:37:42,287 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1734439182287 2024-12-17T12:37:42,287 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 42 msec 2024-12-17T12:37:42,320 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=681c08bfdbdf,38693,1734439057670-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-17T12:37:42,320 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=681c08bfdbdf,38693,1734439057670-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-17T12:37:42,320 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=681c08bfdbdf,38693,1734439057670-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-17T12:37:42,322 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-681c08bfdbdf:38693, period=300000, unit=MILLISECONDS is enabled. 2024-12-17T12:37:42,322 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-17T12:37:42,326 DEBUG [master/681c08bfdbdf:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-17T12:37:42,331 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-17T12:37:42,332 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-17T12:37:42,338 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-17T12:37:42,342 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-17T12:37:42,343 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:37:42,345 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-17T12:37:42,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741835_1011 (size=358) 2024-12-17T12:37:42,768 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 0747a0e153fecce30e3abad582ed5b21, NAME => 'hbase:namespace,,1734439062331.0747a0e153fecce30e3abad582ed5b21.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9 2024-12-17T12:37:42,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741836_1012 (size=42) 2024-12-17T12:37:43,180 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1734439062331.0747a0e153fecce30e3abad582ed5b21.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-17T12:37:43,180 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 0747a0e153fecce30e3abad582ed5b21, disabling compactions & flushes 2024-12-17T12:37:43,181 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1734439062331.0747a0e153fecce30e3abad582ed5b21. 2024-12-17T12:37:43,181 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1734439062331.0747a0e153fecce30e3abad582ed5b21. 2024-12-17T12:37:43,181 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1734439062331.0747a0e153fecce30e3abad582ed5b21. after waiting 0 ms 2024-12-17T12:37:43,181 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1734439062331.0747a0e153fecce30e3abad582ed5b21. 2024-12-17T12:37:43,181 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1734439062331.0747a0e153fecce30e3abad582ed5b21. 2024-12-17T12:37:43,182 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 0747a0e153fecce30e3abad582ed5b21: 2024-12-17T12:37:43,185 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-17T12:37:43,193 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1734439062331.0747a0e153fecce30e3abad582ed5b21.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1734439063186"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734439063186"}]},"ts":"1734439063186"} 2024-12-17T12:37:43,213 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-17T12:37:43,214 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-17T12:37:43,217 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734439063215"}]},"ts":"1734439063215"} 2024-12-17T12:37:43,221 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-17T12:37:43,280 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=0747a0e153fecce30e3abad582ed5b21, ASSIGN}] 2024-12-17T12:37:43,284 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=0747a0e153fecce30e3abad582ed5b21, ASSIGN 2024-12-17T12:37:43,287 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=0747a0e153fecce30e3abad582ed5b21, ASSIGN; state=OFFLINE, location=681c08bfdbdf,36491,1734439058372; forceNewPlan=false, retain=false 2024-12-17T12:37:43,438 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=0747a0e153fecce30e3abad582ed5b21, regionState=OPENING, regionLocation=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:43,446 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 0747a0e153fecce30e3abad582ed5b21, server=681c08bfdbdf,36491,1734439058372}] 2024-12-17T12:37:43,603 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:43,612 INFO [RS_OPEN_PRIORITY_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1734439062331.0747a0e153fecce30e3abad582ed5b21. 2024-12-17T12:37:43,613 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 0747a0e153fecce30e3abad582ed5b21, NAME => 'hbase:namespace,,1734439062331.0747a0e153fecce30e3abad582ed5b21.', STARTKEY => '', ENDKEY => ''} 2024-12-17T12:37:43,614 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 0747a0e153fecce30e3abad582ed5b21 2024-12-17T12:37:43,614 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1734439062331.0747a0e153fecce30e3abad582ed5b21.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-17T12:37:43,614 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 0747a0e153fecce30e3abad582ed5b21 2024-12-17T12:37:43,614 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 0747a0e153fecce30e3abad582ed5b21 2024-12-17T12:37:43,617 INFO [StoreOpener-0747a0e153fecce30e3abad582ed5b21-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 0747a0e153fecce30e3abad582ed5b21 2024-12-17T12:37:43,620 INFO [StoreOpener-0747a0e153fecce30e3abad582ed5b21-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0747a0e153fecce30e3abad582ed5b21 columnFamilyName info 2024-12-17T12:37:43,620 DEBUG [StoreOpener-0747a0e153fecce30e3abad582ed5b21-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:37:43,621 INFO [StoreOpener-0747a0e153fecce30e3abad582ed5b21-1 {}] regionserver.HStore(327): Store=0747a0e153fecce30e3abad582ed5b21/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-17T12:37:43,623 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/hbase/namespace/0747a0e153fecce30e3abad582ed5b21 2024-12-17T12:37:43,624 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/hbase/namespace/0747a0e153fecce30e3abad582ed5b21 2024-12-17T12:37:43,628 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 0747a0e153fecce30e3abad582ed5b21 2024-12-17T12:37:43,631 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/hbase/namespace/0747a0e153fecce30e3abad582ed5b21/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-17T12:37:43,632 INFO [RS_OPEN_PRIORITY_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 0747a0e153fecce30e3abad582ed5b21; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60261739, jitterRate=-0.10203011333942413}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-17T12:37:43,634 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 0747a0e153fecce30e3abad582ed5b21: 2024-12-17T12:37:43,636 INFO [RS_OPEN_PRIORITY_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1734439062331.0747a0e153fecce30e3abad582ed5b21., pid=6, masterSystemTime=1734439063602 2024-12-17T12:37:43,640 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1734439062331.0747a0e153fecce30e3abad582ed5b21. 2024-12-17T12:37:43,640 INFO [RS_OPEN_PRIORITY_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1734439062331.0747a0e153fecce30e3abad582ed5b21. 2024-12-17T12:37:43,641 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=0747a0e153fecce30e3abad582ed5b21, regionState=OPEN, openSeqNum=2, regionLocation=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:43,649 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-17T12:37:43,651 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 0747a0e153fecce30e3abad582ed5b21, server=681c08bfdbdf,36491,1734439058372 in 199 msec 2024-12-17T12:37:43,653 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-17T12:37:43,653 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=0747a0e153fecce30e3abad582ed5b21, ASSIGN in 370 msec 2024-12-17T12:37:43,654 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-17T12:37:43,655 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734439063654"}]},"ts":"1734439063654"} 2024-12-17T12:37:43,657 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-17T12:37:43,704 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-17T12:37:43,709 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 1.3710 sec 2024-12-17T12:37:43,744 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:38693-0x10033fed2a90000, quorum=127.0.0.1:59557, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-17T12:37:43,752 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38693-0x10033fed2a90000, quorum=127.0.0.1:59557, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-17T12:37:43,752 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36491-0x10033fed2a90001, quorum=127.0.0.1:59557, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-17T12:37:43,752 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38693-0x10033fed2a90000, quorum=127.0.0.1:59557, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-17T12:37:43,784 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-17T12:37:43,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38693-0x10033fed2a90000, quorum=127.0.0.1:59557, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-17T12:37:43,816 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 34 msec 2024-12-17T12:37:43,829 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-17T12:37:43,848 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38693-0x10033fed2a90000, quorum=127.0.0.1:59557, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-17T12:37:43,861 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 31 msec 2024-12-17T12:37:43,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38693-0x10033fed2a90000, quorum=127.0.0.1:59557, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-17T12:37:43,910 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38693-0x10033fed2a90000, quorum=127.0.0.1:59557, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-17T12:37:43,911 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 5.249sec 2024-12-17T12:37:43,914 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-17T12:37:43,917 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-17T12:37:43,919 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-17T12:37:43,920 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-17T12:37:43,920 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-17T12:37:43,922 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=681c08bfdbdf,38693,1734439057670-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-17T12:37:43,923 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=681c08bfdbdf,38693,1734439057670-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-17T12:37:43,929 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-17T12:37:43,929 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-17T12:37:43,929 INFO [master/681c08bfdbdf:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=681c08bfdbdf,38693,1734439057670-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-17T12:37:43,933 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x47f9cd1b to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@79fca1e8 2024-12-17T12:37:43,933 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-12-17T12:37:43,945 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4a4c748c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:37:43,948 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-17T12:37:43,948 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-17T12:37:43,957 DEBUG [hconnection-0x1c37e860-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:37:43,966 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35850, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:37:43,974 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=681c08bfdbdf,38693,1734439057670 2024-12-17T12:37:43,986 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=219, OpenFileDescriptor=442, MaxFileDescriptor=1048576, SystemLoadAverage=180, ProcessCount=11, AvailableMemoryMB=4457 2024-12-17T12:37:43,996 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-17T12:37:43,999 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47732, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-17T12:37:44,005 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-17T12:37:44,022 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-17T12:37:44,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-17T12:37:44,027 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-17T12:37:44,028 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 9 2024-12-17T12:37:44,028 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:37:44,031 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-17T12:37:44,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-17T12:37:44,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741837_1013 (size=963) 2024-12-17T12:37:44,049 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9 2024-12-17T12:37:44,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741838_1014 (size=53) 2024-12-17T12:37:44,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-17T12:37:44,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-17T12:37:44,464 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-17T12:37:44,464 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 502e77060db097ea5decbe44e66ef8e7, disabling compactions & flushes 2024-12-17T12:37:44,464 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:44,464 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:44,464 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. after waiting 0 ms 2024-12-17T12:37:44,464 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:44,464 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:44,464 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:37:44,466 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-17T12:37:44,466 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1734439064466"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734439064466"}]},"ts":"1734439064466"} 2024-12-17T12:37:44,469 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-17T12:37:44,470 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-17T12:37:44,470 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734439064470"}]},"ts":"1734439064470"} 2024-12-17T12:37:44,473 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-17T12:37:44,527 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=502e77060db097ea5decbe44e66ef8e7, ASSIGN}] 2024-12-17T12:37:44,531 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=502e77060db097ea5decbe44e66ef8e7, ASSIGN 2024-12-17T12:37:44,532 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=502e77060db097ea5decbe44e66ef8e7, ASSIGN; state=OFFLINE, location=681c08bfdbdf,36491,1734439058372; forceNewPlan=false, retain=false 2024-12-17T12:37:44,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-17T12:37:44,683 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=502e77060db097ea5decbe44e66ef8e7, regionState=OPENING, regionLocation=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:44,689 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372}] 2024-12-17T12:37:44,846 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:44,857 INFO [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:44,858 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} 2024-12-17T12:37:44,858 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 502e77060db097ea5decbe44e66ef8e7 2024-12-17T12:37:44,858 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-17T12:37:44,858 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 502e77060db097ea5decbe44e66ef8e7 2024-12-17T12:37:44,858 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 502e77060db097ea5decbe44e66ef8e7 2024-12-17T12:37:44,861 INFO [StoreOpener-502e77060db097ea5decbe44e66ef8e7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 502e77060db097ea5decbe44e66ef8e7 2024-12-17T12:37:44,864 INFO [StoreOpener-502e77060db097ea5decbe44e66ef8e7-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-17T12:37:44,864 INFO [StoreOpener-502e77060db097ea5decbe44e66ef8e7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 502e77060db097ea5decbe44e66ef8e7 columnFamilyName A 2024-12-17T12:37:44,864 DEBUG [StoreOpener-502e77060db097ea5decbe44e66ef8e7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:37:44,866 INFO [StoreOpener-502e77060db097ea5decbe44e66ef8e7-1 {}] regionserver.HStore(327): Store=502e77060db097ea5decbe44e66ef8e7/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-17T12:37:44,866 INFO [StoreOpener-502e77060db097ea5decbe44e66ef8e7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 502e77060db097ea5decbe44e66ef8e7 2024-12-17T12:37:44,868 INFO [StoreOpener-502e77060db097ea5decbe44e66ef8e7-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-17T12:37:44,869 INFO [StoreOpener-502e77060db097ea5decbe44e66ef8e7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 502e77060db097ea5decbe44e66ef8e7 columnFamilyName B 2024-12-17T12:37:44,869 DEBUG [StoreOpener-502e77060db097ea5decbe44e66ef8e7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:37:44,870 INFO [StoreOpener-502e77060db097ea5decbe44e66ef8e7-1 {}] regionserver.HStore(327): Store=502e77060db097ea5decbe44e66ef8e7/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-17T12:37:44,870 INFO [StoreOpener-502e77060db097ea5decbe44e66ef8e7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 502e77060db097ea5decbe44e66ef8e7 2024-12-17T12:37:44,872 INFO [StoreOpener-502e77060db097ea5decbe44e66ef8e7-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-17T12:37:44,872 INFO [StoreOpener-502e77060db097ea5decbe44e66ef8e7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 502e77060db097ea5decbe44e66ef8e7 columnFamilyName C 2024-12-17T12:37:44,872 DEBUG [StoreOpener-502e77060db097ea5decbe44e66ef8e7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:37:44,873 INFO [StoreOpener-502e77060db097ea5decbe44e66ef8e7-1 {}] regionserver.HStore(327): Store=502e77060db097ea5decbe44e66ef8e7/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-17T12:37:44,873 INFO [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:44,874 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7 2024-12-17T12:37:44,875 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7 2024-12-17T12:37:44,877 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-17T12:37:44,879 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 502e77060db097ea5decbe44e66ef8e7 2024-12-17T12:37:44,882 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-17T12:37:44,883 INFO [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 502e77060db097ea5decbe44e66ef8e7; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63743284, jitterRate=-0.050151050090789795}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-17T12:37:44,883 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:37:44,885 INFO [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7., pid=11, masterSystemTime=1734439064846 2024-12-17T12:37:44,887 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:44,887 INFO [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:44,888 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=502e77060db097ea5decbe44e66ef8e7, regionState=OPEN, openSeqNum=2, regionLocation=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:44,894 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-17T12:37:44,895 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 in 202 msec 2024-12-17T12:37:44,897 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-17T12:37:44,897 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=502e77060db097ea5decbe44e66ef8e7, ASSIGN in 367 msec 2024-12-17T12:37:44,898 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-17T12:37:44,899 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734439064898"}]},"ts":"1734439064898"} 2024-12-17T12:37:44,901 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-17T12:37:44,947 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-17T12:37:44,952 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 925 msec 2024-12-17T12:37:45,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-17T12:37:45,156 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 9 completed 2024-12-17T12:37:45,164 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x307f6610 to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@11dd7d0a 2024-12-17T12:37:45,174 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7bbe9db9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:37:45,176 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:37:45,178 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35862, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:37:45,182 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-17T12:37:45,184 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47734, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-17T12:37:45,192 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x336619a6 to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2d922277 2024-12-17T12:37:45,202 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2ff08b0c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:37:45,203 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x62083522 to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5eb5228e 2024-12-17T12:37:45,211 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32acedd1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:37:45,212 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x035ff1ca to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@614c73a1 2024-12-17T12:37:45,225 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34dbd83c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:37:45,226 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5ddb4a72 to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5f08fb13 2024-12-17T12:37:45,237 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22e0836d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:37:45,238 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x49b4be90 to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@122b664b 2024-12-17T12:37:45,250 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34767488, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:37:45,253 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1c7b4e84 to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6237f435 2024-12-17T12:37:45,267 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@547baa7e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:37:45,270 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0c5c7344 to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5197a8a6 2024-12-17T12:37:45,283 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58b6c0b0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:37:45,285 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3909b87d to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@57d44cac 2024-12-17T12:37:45,294 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e566f95, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:37:45,296 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0072c84e to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@753655c0 2024-12-17T12:37:45,308 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5831a634, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:37:45,316 DEBUG [hconnection-0x4ee426b5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:37:45,317 DEBUG [hconnection-0x1c8a5105-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:37:45,317 DEBUG [hconnection-0x74328c80-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:37:45,317 DEBUG [hconnection-0x7da3ac9b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:37:45,317 DEBUG [hconnection-0x2522abb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:37:45,319 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35874, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:37:45,319 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35890, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:37:45,319 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35868, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:37:45,321 DEBUG [hconnection-0x4c21a880-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:37:45,322 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35902, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:37:45,322 DEBUG [hconnection-0x62bc4cf2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:37:45,324 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35916, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:37:45,324 DEBUG [hconnection-0x79bb3058-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:37:45,326 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-17T12:37:45,326 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35918, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:37:45,328 DEBUG [hconnection-0x3a31c907-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:37:45,332 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35922, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:37:45,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees 2024-12-17T12:37:45,335 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-17T12:37:45,335 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35932, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:37:45,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-17T12:37:45,338 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-17T12:37:45,340 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-17T12:37:45,344 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35936, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:37:45,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 502e77060db097ea5decbe44e66ef8e7 2024-12-17T12:37:45,394 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 502e77060db097ea5decbe44e66ef8e7 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-17T12:37:45,402 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=A 2024-12-17T12:37:45,402 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:45,403 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=B 2024-12-17T12:37:45,403 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:45,403 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=C 2024-12-17T12:37:45,403 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:45,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-17T12:37:45,493 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/0bc642b72715464cac701a29186bd621 is 50, key is test_row_0/A:col10/1734439065369/Put/seqid=0 2024-12-17T12:37:45,497 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:45,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35916 deadline: 1734439125489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:45,501 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:45,501 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:45,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35932 deadline: 1734439125489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:45,503 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-17T12:37:45,516 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:45,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35902 deadline: 1734439125496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:45,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:45,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. as already flushing 2024-12-17T12:37:45,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:45,560 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:45,560 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:45,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439125498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:45,561 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:45,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439125496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:45,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:45,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741839_1015 (size=12001) 2024-12-17T12:37:45,568 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/0bc642b72715464cac701a29186bd621 2024-12-17T12:37:45,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:45,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-17T12:37:45,672 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:45,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439125669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:45,673 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:45,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35932 deadline: 1734439125670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:45,675 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:45,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35916 deadline: 1734439125670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:45,676 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:45,676 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/8b9aa4f0809f436e9f18950d2401ebf8 is 50, key is test_row_0/B:col10/1734439065369/Put/seqid=0 2024-12-17T12:37:45,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439125673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:45,677 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:45,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35902 deadline: 1734439125673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:45,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741840_1016 (size=12001) 2024-12-17T12:37:45,699 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/8b9aa4f0809f436e9f18950d2401ebf8 2024-12-17T12:37:45,727 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:45,728 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-17T12:37:45,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:45,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. as already flushing 2024-12-17T12:37:45,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:45,740 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:45,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:45,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:45,744 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/eb9ed69984714c159a7cf366fcd86e56 is 50, key is test_row_0/C:col10/1734439065369/Put/seqid=0 2024-12-17T12:37:45,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741841_1017 (size=12001) 2024-12-17T12:37:45,767 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/eb9ed69984714c159a7cf366fcd86e56 2024-12-17T12:37:45,784 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/0bc642b72715464cac701a29186bd621 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/0bc642b72715464cac701a29186bd621 2024-12-17T12:37:45,802 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/0bc642b72715464cac701a29186bd621, entries=150, sequenceid=14, filesize=11.7 K 2024-12-17T12:37:45,807 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/8b9aa4f0809f436e9f18950d2401ebf8 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/8b9aa4f0809f436e9f18950d2401ebf8 2024-12-17T12:37:45,820 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/8b9aa4f0809f436e9f18950d2401ebf8, entries=150, sequenceid=14, filesize=11.7 K 2024-12-17T12:37:45,823 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/eb9ed69984714c159a7cf366fcd86e56 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/eb9ed69984714c159a7cf366fcd86e56 2024-12-17T12:37:45,841 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/eb9ed69984714c159a7cf366fcd86e56, entries=150, sequenceid=14, filesize=11.7 K 2024-12-17T12:37:45,843 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 502e77060db097ea5decbe44e66ef8e7 in 448ms, sequenceid=14, compaction requested=false 2024-12-17T12:37:45,844 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-17T12:37:45,846 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:37:45,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 502e77060db097ea5decbe44e66ef8e7 2024-12-17T12:37:45,880 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 502e77060db097ea5decbe44e66ef8e7 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-17T12:37:45,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=A 2024-12-17T12:37:45,881 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:45,881 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=B 2024-12-17T12:37:45,881 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:45,881 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=C 2024-12-17T12:37:45,881 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:45,893 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/21b23c9c54c74687b7c662728c32a240 is 50, key is test_row_0/A:col10/1734439065877/Put/seqid=0 2024-12-17T12:37:45,895 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:45,896 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-17T12:37:45,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:45,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. as already flushing 2024-12-17T12:37:45,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:45,904 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:45,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:45,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:45,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741842_1018 (size=14341) 2024-12-17T12:37:45,921 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/21b23c9c54c74687b7c662728c32a240 2024-12-17T12:37:45,925 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:45,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35932 deadline: 1734439125912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:45,926 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:45,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35916 deadline: 1734439125914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:45,928 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:45,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35902 deadline: 1734439125917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:45,929 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:45,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439125921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:45,931 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:45,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439125917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:45,945 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/ecbb4e16208845bc9b9d512d040c7ef2 is 50, key is test_row_0/B:col10/1734439065877/Put/seqid=0 2024-12-17T12:37:45,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-17T12:37:45,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741843_1019 (size=12001) 2024-12-17T12:37:45,964 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/ecbb4e16208845bc9b9d512d040c7ef2 2024-12-17T12:37:45,984 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/a47c60cb4af8431db47a30a09c2feba5 is 50, key is test_row_0/C:col10/1734439065877/Put/seqid=0 2024-12-17T12:37:45,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741844_1020 (size=12001) 2024-12-17T12:37:46,001 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/a47c60cb4af8431db47a30a09c2feba5 2024-12-17T12:37:46,015 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/21b23c9c54c74687b7c662728c32a240 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/21b23c9c54c74687b7c662728c32a240 2024-12-17T12:37:46,031 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/21b23c9c54c74687b7c662728c32a240, entries=200, sequenceid=38, filesize=14.0 K 2024-12-17T12:37:46,035 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:46,036 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:46,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35902 deadline: 1734439126033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:46,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439126035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:46,037 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:46,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35916 deadline: 1734439126035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:46,037 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:46,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35932 deadline: 1734439126035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:46,039 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/ecbb4e16208845bc9b9d512d040c7ef2 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/ecbb4e16208845bc9b9d512d040c7ef2 2024-12-17T12:37:46,040 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:46,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439126038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:46,053 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/ecbb4e16208845bc9b9d512d040c7ef2, entries=150, sequenceid=38, filesize=11.7 K 2024-12-17T12:37:46,056 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/a47c60cb4af8431db47a30a09c2feba5 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/a47c60cb4af8431db47a30a09c2feba5 2024-12-17T12:37:46,058 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:46,059 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-17T12:37:46,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:46,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. as already flushing 2024-12-17T12:37:46,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:46,059 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:46,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:46,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:46,069 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/a47c60cb4af8431db47a30a09c2feba5, entries=150, sequenceid=38, filesize=11.7 K 2024-12-17T12:37:46,071 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 502e77060db097ea5decbe44e66ef8e7 in 190ms, sequenceid=38, compaction requested=false 2024-12-17T12:37:46,071 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:37:46,213 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:46,214 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-17T12:37:46,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:46,215 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing 502e77060db097ea5decbe44e66ef8e7 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-17T12:37:46,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=A 2024-12-17T12:37:46,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:46,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=B 2024-12-17T12:37:46,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:46,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=C 2024-12-17T12:37:46,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:46,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/150079d0e09848c4ab576b7152782072 is 50, key is test_row_0/A:col10/1734439065895/Put/seqid=0 2024-12-17T12:37:46,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 502e77060db097ea5decbe44e66ef8e7 2024-12-17T12:37:46,245 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. as already flushing 2024-12-17T12:37:46,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741845_1021 (size=12001) 2024-12-17T12:37:46,332 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:46,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439126325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:46,333 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:46,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35902 deadline: 1734439126325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:46,334 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:46,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35932 deadline: 1734439126326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:46,335 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:46,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439126328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:46,336 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:46,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35916 deadline: 1734439126328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:46,438 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:46,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439126436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:46,441 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:46,441 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:46,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439126440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:46,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35916 deadline: 1734439126440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:46,442 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:46,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35902 deadline: 1734439126440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:46,444 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:46,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35932 deadline: 1734439126444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:46,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-17T12:37:46,645 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:46,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439126643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:46,646 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:46,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35916 deadline: 1734439126645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:46,647 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:46,647 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:46,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35902 deadline: 1734439126646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:46,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439126647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:46,651 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:46,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35932 deadline: 1734439126647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:46,660 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/150079d0e09848c4ab576b7152782072 2024-12-17T12:37:46,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/b5066111b3a541558cdd1b73081771ba is 50, key is test_row_0/B:col10/1734439065895/Put/seqid=0 2024-12-17T12:37:46,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741846_1022 (size=12001) 2024-12-17T12:37:46,859 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-17T12:37:46,860 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-17T12:37:46,950 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:46,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439126949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:46,951 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:46,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35916 deadline: 1734439126949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:46,953 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:46,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35902 deadline: 1734439126950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:46,954 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:46,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439126952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:46,956 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:46,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35932 deadline: 1734439126956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:47,100 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/b5066111b3a541558cdd1b73081771ba 2024-12-17T12:37:47,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/454b8cbd62df49d287c823cb751a7d9b is 50, key is test_row_0/C:col10/1734439065895/Put/seqid=0 2024-12-17T12:37:47,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741847_1023 (size=12001) 2024-12-17T12:37:47,149 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/454b8cbd62df49d287c823cb751a7d9b 2024-12-17T12:37:47,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/150079d0e09848c4ab576b7152782072 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/150079d0e09848c4ab576b7152782072 2024-12-17T12:37:47,179 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/150079d0e09848c4ab576b7152782072, entries=150, sequenceid=51, filesize=11.7 K 2024-12-17T12:37:47,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/b5066111b3a541558cdd1b73081771ba as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/b5066111b3a541558cdd1b73081771ba 2024-12-17T12:37:47,201 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/b5066111b3a541558cdd1b73081771ba, entries=150, sequenceid=51, filesize=11.7 K 2024-12-17T12:37:47,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/454b8cbd62df49d287c823cb751a7d9b as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/454b8cbd62df49d287c823cb751a7d9b 2024-12-17T12:37:47,219 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/454b8cbd62df49d287c823cb751a7d9b, entries=150, sequenceid=51, filesize=11.7 K 2024-12-17T12:37:47,220 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 502e77060db097ea5decbe44e66ef8e7 in 1005ms, sequenceid=51, compaction requested=true 2024-12-17T12:37:47,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:37:47,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:47,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-12-17T12:37:47,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-12-17T12:37:47,228 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-12-17T12:37:47,228 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8830 sec 2024-12-17T12:37:47,256 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees in 1.9250 sec 2024-12-17T12:37:47,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-17T12:37:47,450 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 12 completed 2024-12-17T12:37:47,453 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-17T12:37:47,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees 2024-12-17T12:37:47,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-17T12:37:47,456 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-17T12:37:47,459 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-17T12:37:47,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 502e77060db097ea5decbe44e66ef8e7 2024-12-17T12:37:47,459 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-17T12:37:47,459 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 502e77060db097ea5decbe44e66ef8e7 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-17T12:37:47,460 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=A 2024-12-17T12:37:47,460 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:47,460 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=B 2024-12-17T12:37:47,460 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:47,460 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=C 2024-12-17T12:37:47,460 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:47,467 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/19961d3360f94cb280f28249213e1ac8 is 50, key is test_row_0/A:col10/1734439066324/Put/seqid=0 2024-12-17T12:37:47,478 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:47,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35932 deadline: 1734439127470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:47,479 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:47,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439127473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:47,480 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:47,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35902 deadline: 1734439127473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:47,481 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:47,481 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:47,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439127475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:47,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35916 deadline: 1734439127475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:47,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741848_1024 (size=12001) 2024-12-17T12:37:47,509 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/19961d3360f94cb280f28249213e1ac8 2024-12-17T12:37:47,530 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/04d8f0355ebd423fb6c17594755de32e is 50, key is test_row_0/B:col10/1734439066324/Put/seqid=0 2024-12-17T12:37:47,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741849_1025 (size=12001) 2024-12-17T12:37:47,543 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/04d8f0355ebd423fb6c17594755de32e 2024-12-17T12:37:47,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-17T12:37:47,578 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/6d9bc5840abf42859b824d5963337d04 is 50, key is test_row_0/C:col10/1734439066324/Put/seqid=0 2024-12-17T12:37:47,583 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:47,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35932 deadline: 1734439127581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:47,585 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:47,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439127582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:47,588 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:47,588 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:47,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35916 deadline: 1734439127584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:47,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35902 deadline: 1734439127583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:47,589 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:47,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439127584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:47,612 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:47,613 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-17T12:37:47,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:47,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. as already flushing 2024-12-17T12:37:47,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:47,613 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:47,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:47,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:47,616 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-17T12:37:47,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741850_1026 (size=12001) 2024-12-17T12:37:47,621 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/6d9bc5840abf42859b824d5963337d04 2024-12-17T12:37:47,650 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/19961d3360f94cb280f28249213e1ac8 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/19961d3360f94cb280f28249213e1ac8 2024-12-17T12:37:47,667 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/19961d3360f94cb280f28249213e1ac8, entries=150, sequenceid=75, filesize=11.7 K 2024-12-17T12:37:47,674 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/04d8f0355ebd423fb6c17594755de32e as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/04d8f0355ebd423fb6c17594755de32e 2024-12-17T12:37:47,687 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/04d8f0355ebd423fb6c17594755de32e, entries=150, sequenceid=75, filesize=11.7 K 2024-12-17T12:37:47,692 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/6d9bc5840abf42859b824d5963337d04 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/6d9bc5840abf42859b824d5963337d04 2024-12-17T12:37:47,706 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/6d9bc5840abf42859b824d5963337d04, entries=150, sequenceid=75, filesize=11.7 K 2024-12-17T12:37:47,708 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 502e77060db097ea5decbe44e66ef8e7 in 249ms, sequenceid=75, compaction requested=true 2024-12-17T12:37:47,708 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:37:47,710 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 502e77060db097ea5decbe44e66ef8e7:A, priority=-2147483648, current under compaction store size is 1 2024-12-17T12:37:47,710 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:37:47,711 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 502e77060db097ea5decbe44e66ef8e7:B, priority=-2147483648, current under compaction store size is 2 2024-12-17T12:37:47,711 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:37:47,711 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 502e77060db097ea5decbe44e66ef8e7:C, priority=-2147483648, current under compaction store size is 3 2024-12-17T12:37:47,711 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:37:47,711 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-17T12:37:47,711 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-17T12:37:47,716 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-17T12:37:47,719 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 502e77060db097ea5decbe44e66ef8e7/B is initiating minor compaction (all files) 2024-12-17T12:37:47,719 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 502e77060db097ea5decbe44e66ef8e7/B in TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:47,719 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/8b9aa4f0809f436e9f18950d2401ebf8, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/ecbb4e16208845bc9b9d512d040c7ef2, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/b5066111b3a541558cdd1b73081771ba, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/04d8f0355ebd423fb6c17594755de32e] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp, totalSize=46.9 K 2024-12-17T12:37:47,721 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 8b9aa4f0809f436e9f18950d2401ebf8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1734439065367 2024-12-17T12:37:47,722 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting ecbb4e16208845bc9b9d512d040c7ef2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1734439065462 2024-12-17T12:37:47,722 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting b5066111b3a541558cdd1b73081771ba, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1734439065895 2024-12-17T12:37:47,725 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 04d8f0355ebd423fb6c17594755de32e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1734439066317 2024-12-17T12:37:47,729 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50344 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-17T12:37:47,729 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): 502e77060db097ea5decbe44e66ef8e7/A is initiating minor compaction (all files) 2024-12-17T12:37:47,730 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 502e77060db097ea5decbe44e66ef8e7/A in TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:47,730 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/0bc642b72715464cac701a29186bd621, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/21b23c9c54c74687b7c662728c32a240, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/150079d0e09848c4ab576b7152782072, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/19961d3360f94cb280f28249213e1ac8] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp, totalSize=49.2 K 2024-12-17T12:37:47,740 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0bc642b72715464cac701a29186bd621, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1734439065367 2024-12-17T12:37:47,742 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 21b23c9c54c74687b7c662728c32a240, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1734439065462 2024-12-17T12:37:47,744 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 150079d0e09848c4ab576b7152782072, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1734439065895 2024-12-17T12:37:47,746 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 19961d3360f94cb280f28249213e1ac8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1734439066317 2024-12-17T12:37:47,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-17T12:37:47,767 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:47,768 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-17T12:37:47,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:47,768 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing 502e77060db097ea5decbe44e66ef8e7 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-17T12:37:47,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=A 2024-12-17T12:37:47,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:47,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=B 2024-12-17T12:37:47,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:47,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=C 2024-12-17T12:37:47,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:47,796 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 502e77060db097ea5decbe44e66ef8e7#A#compaction#13 average throughput is 0.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:37:47,800 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 502e77060db097ea5decbe44e66ef8e7#B#compaction#12 average throughput is 0.24 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:37:47,801 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/216bfe23dabc45daade9d26eadfe9a5a is 50, key is test_row_0/B:col10/1734439066324/Put/seqid=0 2024-12-17T12:37:47,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/87b0c6d178b34880b577873124dff88d is 50, key is test_row_0/A:col10/1734439067470/Put/seqid=0 2024-12-17T12:37:47,804 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/b43758a7a76d476984e03720925c0120 is 50, key is test_row_0/A:col10/1734439066324/Put/seqid=0 2024-12-17T12:37:47,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741851_1027 (size=12139) 2024-12-17T12:37:47,823 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/216bfe23dabc45daade9d26eadfe9a5a as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/216bfe23dabc45daade9d26eadfe9a5a 2024-12-17T12:37:47,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741852_1028 (size=12001) 2024-12-17T12:37:47,835 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=87 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/87b0c6d178b34880b577873124dff88d 2024-12-17T12:37:47,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 502e77060db097ea5decbe44e66ef8e7 2024-12-17T12:37:47,840 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. as already flushing 2024-12-17T12:37:47,842 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 502e77060db097ea5decbe44e66ef8e7/B of 502e77060db097ea5decbe44e66ef8e7 into 216bfe23dabc45daade9d26eadfe9a5a(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:37:47,844 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:37:47,844 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7., storeName=502e77060db097ea5decbe44e66ef8e7/B, priority=12, startTime=1734439067711; duration=0sec 2024-12-17T12:37:47,845 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:37:47,845 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 502e77060db097ea5decbe44e66ef8e7:B 2024-12-17T12:37:47,845 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-17T12:37:47,850 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-17T12:37:47,850 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 502e77060db097ea5decbe44e66ef8e7/C is initiating minor compaction (all files) 2024-12-17T12:37:47,850 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 502e77060db097ea5decbe44e66ef8e7/C in TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:47,850 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/eb9ed69984714c159a7cf366fcd86e56, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/a47c60cb4af8431db47a30a09c2feba5, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/454b8cbd62df49d287c823cb751a7d9b, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/6d9bc5840abf42859b824d5963337d04] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp, totalSize=46.9 K 2024-12-17T12:37:47,852 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting eb9ed69984714c159a7cf366fcd86e56, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1734439065367 2024-12-17T12:37:47,853 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting a47c60cb4af8431db47a30a09c2feba5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1734439065462 2024-12-17T12:37:47,854 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 454b8cbd62df49d287c823cb751a7d9b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1734439065895 2024-12-17T12:37:47,854 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 6d9bc5840abf42859b824d5963337d04, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1734439066317 2024-12-17T12:37:47,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741853_1029 (size=12139) 2024-12-17T12:37:47,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/760d8912c1c943f8b0b8b37cbf5c2d05 is 50, key is test_row_0/B:col10/1734439067470/Put/seqid=0 2024-12-17T12:37:47,876 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/b43758a7a76d476984e03720925c0120 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/b43758a7a76d476984e03720925c0120 2024-12-17T12:37:47,891 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 502e77060db097ea5decbe44e66ef8e7/A of 502e77060db097ea5decbe44e66ef8e7 into b43758a7a76d476984e03720925c0120(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:37:47,891 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:37:47,891 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7., storeName=502e77060db097ea5decbe44e66ef8e7/A, priority=12, startTime=1734439067710; duration=0sec 2024-12-17T12:37:47,892 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:37:47,892 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 502e77060db097ea5decbe44e66ef8e7:A 2024-12-17T12:37:47,904 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 502e77060db097ea5decbe44e66ef8e7#C#compaction#16 average throughput is 0.66 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:37:47,905 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/725ed8e6c3384b83961e2c7a74485b92 is 50, key is test_row_0/C:col10/1734439066324/Put/seqid=0 2024-12-17T12:37:47,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741854_1030 (size=12001) 2024-12-17T12:37:47,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741855_1031 (size=12139) 2024-12-17T12:37:47,937 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:47,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35932 deadline: 1734439127921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:47,938 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:47,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35916 deadline: 1734439127933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:47,945 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/725ed8e6c3384b83961e2c7a74485b92 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/725ed8e6c3384b83961e2c7a74485b92 2024-12-17T12:37:47,945 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:47,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35902 deadline: 1734439127937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:47,947 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:47,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439127937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:47,948 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:47,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439127938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:47,959 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 502e77060db097ea5decbe44e66ef8e7/C of 502e77060db097ea5decbe44e66ef8e7 into 725ed8e6c3384b83961e2c7a74485b92(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:37:47,959 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:37:47,959 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7., storeName=502e77060db097ea5decbe44e66ef8e7/C, priority=12, startTime=1734439067711; duration=0sec 2024-12-17T12:37:47,959 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:37:47,959 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 502e77060db097ea5decbe44e66ef8e7:C 2024-12-17T12:37:48,042 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:48,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35932 deadline: 1734439128039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:48,044 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:48,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35916 deadline: 1734439128041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:48,050 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:48,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35902 deadline: 1734439128048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:48,051 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:48,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439128050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:48,054 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:48,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439128050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:48,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-17T12:37:48,069 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-17T12:37:48,069 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-17T12:37:48,071 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-17T12:37:48,071 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-12-17T12:37:48,073 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-17T12:37:48,073 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-17T12:37:48,073 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-17T12:37:48,073 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-17T12:37:48,075 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-17T12:37:48,075 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-12-17T12:37:48,245 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:48,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35932 deadline: 1734439128244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:48,247 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:48,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35916 deadline: 1734439128247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:48,254 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:48,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35902 deadline: 1734439128253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:48,255 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:48,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439128255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:48,259 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:48,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439128258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:48,310 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=87 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/760d8912c1c943f8b0b8b37cbf5c2d05 2024-12-17T12:37:48,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/ff3a685f36d84e76b1f5c970db56e2be is 50, key is test_row_0/C:col10/1734439067470/Put/seqid=0 2024-12-17T12:37:48,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741856_1032 (size=12001) 2024-12-17T12:37:48,371 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=87 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/ff3a685f36d84e76b1f5c970db56e2be 2024-12-17T12:37:48,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/87b0c6d178b34880b577873124dff88d as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/87b0c6d178b34880b577873124dff88d 2024-12-17T12:37:48,395 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/87b0c6d178b34880b577873124dff88d, entries=150, sequenceid=87, filesize=11.7 K 2024-12-17T12:37:48,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/760d8912c1c943f8b0b8b37cbf5c2d05 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/760d8912c1c943f8b0b8b37cbf5c2d05 2024-12-17T12:37:48,413 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/760d8912c1c943f8b0b8b37cbf5c2d05, entries=150, sequenceid=87, filesize=11.7 K 2024-12-17T12:37:48,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/ff3a685f36d84e76b1f5c970db56e2be as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/ff3a685f36d84e76b1f5c970db56e2be 2024-12-17T12:37:48,430 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/ff3a685f36d84e76b1f5c970db56e2be, entries=150, sequenceid=87, filesize=11.7 K 2024-12-17T12:37:48,433 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 502e77060db097ea5decbe44e66ef8e7 in 664ms, sequenceid=87, compaction requested=false 2024-12-17T12:37:48,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:37:48,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:48,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-12-17T12:37:48,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-12-17T12:37:48,443 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-12-17T12:37:48,444 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 981 msec 2024-12-17T12:37:48,448 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees in 992 msec 2024-12-17T12:37:48,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 502e77060db097ea5decbe44e66ef8e7 2024-12-17T12:37:48,551 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 502e77060db097ea5decbe44e66ef8e7 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-17T12:37:48,551 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=A 2024-12-17T12:37:48,551 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:48,551 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=B 2024-12-17T12:37:48,551 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:48,551 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=C 2024-12-17T12:37:48,551 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:48,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-17T12:37:48,561 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 14 completed 2024-12-17T12:37:48,564 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-17T12:37:48,566 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/5599beca86684aedbd8e953af032c03a is 50, key is test_row_0/A:col10/1734439068548/Put/seqid=0 2024-12-17T12:37:48,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees 2024-12-17T12:37:48,569 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-17T12:37:48,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-17T12:37:48,571 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-17T12:37:48,571 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-17T12:37:48,573 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:48,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35902 deadline: 1734439128565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:48,578 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:48,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35932 deadline: 1734439128571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:48,579 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:48,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439128572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:48,579 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:48,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35916 deadline: 1734439128572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:48,579 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:48,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439128573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:48,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741857_1033 (size=14341) 2024-12-17T12:37:48,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-17T12:37:48,676 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:48,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35902 deadline: 1734439128675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:48,682 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:48,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35916 deadline: 1734439128681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:48,682 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:48,683 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:48,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35932 deadline: 1734439128681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:48,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439128681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:48,706 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:48,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439128706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:48,724 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:48,726 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-17T12:37:48,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:48,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. as already flushing 2024-12-17T12:37:48,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:48,726 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:48,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:48,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:48,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-17T12:37:48,879 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:48,880 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-17T12:37:48,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:48,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. as already flushing 2024-12-17T12:37:48,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:48,880 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:48,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:48,881 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:48,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:48,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35902 deadline: 1734439128880, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:48,887 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:48,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439128886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:48,888 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:48,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35932 deadline: 1734439128886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:48,888 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:48,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35916 deadline: 1734439128887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:48,910 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:48,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439128909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:49,008 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/5599beca86684aedbd8e953af032c03a 2024-12-17T12:37:49,025 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/74757e060b1c4500a25caaf7fc6d81b7 is 50, key is test_row_0/B:col10/1734439068548/Put/seqid=0 2024-12-17T12:37:49,033 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:49,034 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-17T12:37:49,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:49,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. as already flushing 2024-12-17T12:37:49,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:49,035 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:49,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:49,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:49,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741858_1034 (size=12001) 2024-12-17T12:37:49,042 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/74757e060b1c4500a25caaf7fc6d81b7 2024-12-17T12:37:49,059 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/d4391d361e6e443e858b8cb0c0cb6701 is 50, key is test_row_0/C:col10/1734439068548/Put/seqid=0 2024-12-17T12:37:49,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741859_1035 (size=12001) 2024-12-17T12:37:49,075 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/d4391d361e6e443e858b8cb0c0cb6701 2024-12-17T12:37:49,096 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/5599beca86684aedbd8e953af032c03a as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/5599beca86684aedbd8e953af032c03a 2024-12-17T12:37:49,114 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/5599beca86684aedbd8e953af032c03a, entries=200, sequenceid=115, filesize=14.0 K 2024-12-17T12:37:49,117 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/74757e060b1c4500a25caaf7fc6d81b7 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/74757e060b1c4500a25caaf7fc6d81b7 2024-12-17T12:37:49,128 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/74757e060b1c4500a25caaf7fc6d81b7, entries=150, sequenceid=115, filesize=11.7 K 2024-12-17T12:37:49,130 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/d4391d361e6e443e858b8cb0c0cb6701 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/d4391d361e6e443e858b8cb0c0cb6701 2024-12-17T12:37:49,147 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/d4391d361e6e443e858b8cb0c0cb6701, entries=150, sequenceid=115, filesize=11.7 K 2024-12-17T12:37:49,149 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 502e77060db097ea5decbe44e66ef8e7 in 598ms, sequenceid=115, compaction requested=true 2024-12-17T12:37:49,149 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:37:49,149 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 502e77060db097ea5decbe44e66ef8e7:A, priority=-2147483648, current under compaction store size is 1 2024-12-17T12:37:49,149 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:37:49,149 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:37:49,149 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:37:49,153 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 502e77060db097ea5decbe44e66ef8e7:B, priority=-2147483648, current under compaction store size is 2 2024-12-17T12:37:49,153 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:37:49,153 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:37:49,153 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 502e77060db097ea5decbe44e66ef8e7/B is initiating minor compaction (all files) 2024-12-17T12:37:49,153 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 502e77060db097ea5decbe44e66ef8e7:C, priority=-2147483648, current under compaction store size is 3 2024-12-17T12:37:49,153 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:37:49,153 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 502e77060db097ea5decbe44e66ef8e7/B in TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:49,153 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/216bfe23dabc45daade9d26eadfe9a5a, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/760d8912c1c943f8b0b8b37cbf5c2d05, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/74757e060b1c4500a25caaf7fc6d81b7] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp, totalSize=35.3 K 2024-12-17T12:37:49,154 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38481 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:37:49,154 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): 502e77060db097ea5decbe44e66ef8e7/A is initiating minor compaction (all files) 2024-12-17T12:37:49,154 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 502e77060db097ea5decbe44e66ef8e7/A in TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:49,154 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/b43758a7a76d476984e03720925c0120, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/87b0c6d178b34880b577873124dff88d, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/5599beca86684aedbd8e953af032c03a] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp, totalSize=37.6 K 2024-12-17T12:37:49,154 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 216bfe23dabc45daade9d26eadfe9a5a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1734439066317 2024-12-17T12:37:49,155 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 760d8912c1c943f8b0b8b37cbf5c2d05, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=87, earliestPutTs=1734439067467 2024-12-17T12:37:49,155 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting b43758a7a76d476984e03720925c0120, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1734439066317 2024-12-17T12:37:49,155 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 87b0c6d178b34880b577873124dff88d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=87, earliestPutTs=1734439067467 2024-12-17T12:37:49,156 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5599beca86684aedbd8e953af032c03a, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1734439067918 2024-12-17T12:37:49,160 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 74757e060b1c4500a25caaf7fc6d81b7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1734439067918 2024-12-17T12:37:49,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-17T12:37:49,182 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 502e77060db097ea5decbe44e66ef8e7#A#compaction#21 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:37:49,183 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/5758a35fd363413f9cbef2f0e4acbe8b is 50, key is test_row_0/A:col10/1734439068548/Put/seqid=0 2024-12-17T12:37:49,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 502e77060db097ea5decbe44e66ef8e7 2024-12-17T12:37:49,187 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:49,187 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 502e77060db097ea5decbe44e66ef8e7 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-17T12:37:49,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=A 2024-12-17T12:37:49,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:49,188 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=B 2024-12-17T12:37:49,188 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:49,188 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=C 2024-12-17T12:37:49,188 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:49,188 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-17T12:37:49,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:49,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. as already flushing 2024-12-17T12:37:49,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:49,189 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:49,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:49,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:49,193 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 502e77060db097ea5decbe44e66ef8e7#B#compaction#22 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:37:49,194 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/0d8a07a1f3184c388f94dea2cfc8a209 is 50, key is test_row_0/B:col10/1734439068548/Put/seqid=0 2024-12-17T12:37:49,200 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/90999d8e166d4e41b5cedbd8bc188ca0 is 50, key is test_row_0/A:col10/1734439069185/Put/seqid=0 2024-12-17T12:37:49,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741861_1037 (size=12241) 2024-12-17T12:37:49,236 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:49,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35932 deadline: 1734439129229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:49,241 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:49,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439129234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:49,242 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:49,242 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:49,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439129235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:49,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35902 deadline: 1734439129236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:49,243 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:49,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35916 deadline: 1734439129236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:49,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741860_1036 (size=12241) 2024-12-17T12:37:49,260 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/5758a35fd363413f9cbef2f0e4acbe8b as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/5758a35fd363413f9cbef2f0e4acbe8b 2024-12-17T12:37:49,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741862_1038 (size=12001) 2024-12-17T12:37:49,276 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 502e77060db097ea5decbe44e66ef8e7/A of 502e77060db097ea5decbe44e66ef8e7 into 5758a35fd363413f9cbef2f0e4acbe8b(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:37:49,276 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:37:49,276 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7., storeName=502e77060db097ea5decbe44e66ef8e7/A, priority=13, startTime=1734439069149; duration=0sec 2024-12-17T12:37:49,276 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:37:49,276 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 502e77060db097ea5decbe44e66ef8e7:A 2024-12-17T12:37:49,278 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:37:49,280 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:37:49,280 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): 502e77060db097ea5decbe44e66ef8e7/C is initiating minor compaction (all files) 2024-12-17T12:37:49,280 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 502e77060db097ea5decbe44e66ef8e7/C in TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:49,281 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/725ed8e6c3384b83961e2c7a74485b92, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/ff3a685f36d84e76b1f5c970db56e2be, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/d4391d361e6e443e858b8cb0c0cb6701] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp, totalSize=35.3 K 2024-12-17T12:37:49,281 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 725ed8e6c3384b83961e2c7a74485b92, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1734439066317 2024-12-17T12:37:49,282 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting ff3a685f36d84e76b1f5c970db56e2be, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=87, earliestPutTs=1734439067467 2024-12-17T12:37:49,283 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting d4391d361e6e443e858b8cb0c0cb6701, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1734439067918 2024-12-17T12:37:49,300 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 502e77060db097ea5decbe44e66ef8e7#C#compaction#24 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:37:49,301 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/44481a16d4f64e3f8d5452eaa8098926 is 50, key is test_row_0/C:col10/1734439068548/Put/seqid=0 2024-12-17T12:37:49,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741863_1039 (size=12241) 2024-12-17T12:37:49,339 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:49,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35932 deadline: 1734439129338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:49,342 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:49,343 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-17T12:37:49,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:49,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. as already flushing 2024-12-17T12:37:49,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:49,344 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:49,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:49,345 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:49,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35902 deadline: 1734439129344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:49,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:49,348 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:49,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439129345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:49,349 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:49,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439129345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:49,350 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:49,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35916 deadline: 1734439129346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:49,498 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:49,498 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-17T12:37:49,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:49,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. as already flushing 2024-12-17T12:37:49,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:49,498 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:49,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:49,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:49,542 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:49,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35932 deadline: 1734439129541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:49,551 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:49,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35902 deadline: 1734439129548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:49,553 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:49,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35916 deadline: 1734439129553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:49,555 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:49,555 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:49,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439129555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:49,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439129555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:49,620 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/0d8a07a1f3184c388f94dea2cfc8a209 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/0d8a07a1f3184c388f94dea2cfc8a209 2024-12-17T12:37:49,632 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 502e77060db097ea5decbe44e66ef8e7/B of 502e77060db097ea5decbe44e66ef8e7 into 0d8a07a1f3184c388f94dea2cfc8a209(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:37:49,632 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:37:49,632 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7., storeName=502e77060db097ea5decbe44e66ef8e7/B, priority=13, startTime=1734439069149; duration=0sec 2024-12-17T12:37:49,632 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:37:49,632 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 502e77060db097ea5decbe44e66ef8e7:B 2024-12-17T12:37:49,651 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:49,651 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-17T12:37:49,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:49,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. as already flushing 2024-12-17T12:37:49,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:49,652 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:49,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:49,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:49,666 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/90999d8e166d4e41b5cedbd8bc188ca0 2024-12-17T12:37:49,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-17T12:37:49,678 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/57968acc142d40488916b94031faee6a is 50, key is test_row_0/B:col10/1734439069185/Put/seqid=0 2024-12-17T12:37:49,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741864_1040 (size=12001) 2024-12-17T12:37:49,683 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/57968acc142d40488916b94031faee6a 2024-12-17T12:37:49,698 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/939be3926f9842c0845cb6ae7b0f734a is 50, key is test_row_0/C:col10/1734439069185/Put/seqid=0 2024-12-17T12:37:49,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741865_1041 (size=12001) 2024-12-17T12:37:49,729 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/44481a16d4f64e3f8d5452eaa8098926 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/44481a16d4f64e3f8d5452eaa8098926 2024-12-17T12:37:49,741 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 502e77060db097ea5decbe44e66ef8e7/C of 502e77060db097ea5decbe44e66ef8e7 into 44481a16d4f64e3f8d5452eaa8098926(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:37:49,741 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:37:49,742 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7., storeName=502e77060db097ea5decbe44e66ef8e7/C, priority=13, startTime=1734439069153; duration=0sec 2024-12-17T12:37:49,742 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:37:49,742 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 502e77060db097ea5decbe44e66ef8e7:C 2024-12-17T12:37:49,804 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:49,805 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-17T12:37:49,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:49,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. as already flushing 2024-12-17T12:37:49,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:49,805 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:49,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:49,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:49,846 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:49,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35932 deadline: 1734439129846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:49,858 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:49,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35902 deadline: 1734439129857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:49,860 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:49,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35916 deadline: 1734439129857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:49,863 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:49,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439129859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:49,863 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:49,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439129862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:49,959 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:49,960 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-17T12:37:49,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:49,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. as already flushing 2024-12-17T12:37:49,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:49,960 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:49,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:49,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:50,105 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/939be3926f9842c0845cb6ae7b0f734a 2024-12-17T12:37:50,114 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:50,114 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-17T12:37:50,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:50,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. as already flushing 2024-12-17T12:37:50,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:50,116 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:50,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:50,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:50,117 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/90999d8e166d4e41b5cedbd8bc188ca0 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/90999d8e166d4e41b5cedbd8bc188ca0 2024-12-17T12:37:50,126 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/90999d8e166d4e41b5cedbd8bc188ca0, entries=150, sequenceid=127, filesize=11.7 K 2024-12-17T12:37:50,128 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/57968acc142d40488916b94031faee6a as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/57968acc142d40488916b94031faee6a 2024-12-17T12:37:50,137 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/57968acc142d40488916b94031faee6a, entries=150, sequenceid=127, filesize=11.7 K 2024-12-17T12:37:50,139 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/939be3926f9842c0845cb6ae7b0f734a as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/939be3926f9842c0845cb6ae7b0f734a 2024-12-17T12:37:50,149 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/939be3926f9842c0845cb6ae7b0f734a, entries=150, sequenceid=127, filesize=11.7 K 2024-12-17T12:37:50,151 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 502e77060db097ea5decbe44e66ef8e7 in 964ms, sequenceid=127, compaction requested=false 2024-12-17T12:37:50,151 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:37:50,268 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:50,269 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-17T12:37:50,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:50,269 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing 502e77060db097ea5decbe44e66ef8e7 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-17T12:37:50,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=A 2024-12-17T12:37:50,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:50,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=B 2024-12-17T12:37:50,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:50,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=C 2024-12-17T12:37:50,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:50,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/c06293551ffb4d82b30c438843da348c is 50, key is test_row_0/A:col10/1734439069231/Put/seqid=0 2024-12-17T12:37:50,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741866_1042 (size=12151) 2024-12-17T12:37:50,290 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/c06293551ffb4d82b30c438843da348c 2024-12-17T12:37:50,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/ca0e8ee18bf3402394254cd37b42e01b is 50, key is test_row_0/B:col10/1734439069231/Put/seqid=0 2024-12-17T12:37:50,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741867_1043 (size=12151) 2024-12-17T12:37:50,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 502e77060db097ea5decbe44e66ef8e7 2024-12-17T12:37:50,353 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. as already flushing 2024-12-17T12:37:50,370 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:50,370 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:50,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35902 deadline: 1734439130367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:50,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35916 deadline: 1734439130367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:50,370 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:50,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35932 deadline: 1734439130368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:50,371 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:50,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439130369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:50,371 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:50,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439130369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:50,472 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:50,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35902 deadline: 1734439130471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:50,472 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:50,472 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:50,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35932 deadline: 1734439130472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:50,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35916 deadline: 1734439130472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:50,675 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:50,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-17T12:37:50,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35932 deadline: 1734439130674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:50,675 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:50,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35902 deadline: 1734439130674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:50,677 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:50,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35916 deadline: 1734439130675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:50,722 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/ca0e8ee18bf3402394254cd37b42e01b 2024-12-17T12:37:50,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/431e4ec352cf40c9806938a5bfd49b2b is 50, key is test_row_0/C:col10/1734439069231/Put/seqid=0 2024-12-17T12:37:50,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741868_1044 (size=12151) 2024-12-17T12:37:50,758 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/431e4ec352cf40c9806938a5bfd49b2b 2024-12-17T12:37:50,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/c06293551ffb4d82b30c438843da348c as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/c06293551ffb4d82b30c438843da348c 2024-12-17T12:37:50,775 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/c06293551ffb4d82b30c438843da348c, entries=150, sequenceid=155, filesize=11.9 K 2024-12-17T12:37:50,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/ca0e8ee18bf3402394254cd37b42e01b as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/ca0e8ee18bf3402394254cd37b42e01b 2024-12-17T12:37:50,788 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/ca0e8ee18bf3402394254cd37b42e01b, entries=150, sequenceid=155, filesize=11.9 K 2024-12-17T12:37:50,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/431e4ec352cf40c9806938a5bfd49b2b as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/431e4ec352cf40c9806938a5bfd49b2b 2024-12-17T12:37:50,802 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/431e4ec352cf40c9806938a5bfd49b2b, entries=150, sequenceid=155, filesize=11.9 K 2024-12-17T12:37:50,803 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 502e77060db097ea5decbe44e66ef8e7 in 534ms, sequenceid=155, compaction requested=true 2024-12-17T12:37:50,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:37:50,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:50,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-12-17T12:37:50,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-12-17T12:37:50,809 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-12-17T12:37:50,809 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2350 sec 2024-12-17T12:37:50,811 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees in 2.2460 sec 2024-12-17T12:37:50,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 502e77060db097ea5decbe44e66ef8e7 2024-12-17T12:37:50,980 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 502e77060db097ea5decbe44e66ef8e7 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-17T12:37:50,982 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=A 2024-12-17T12:37:50,982 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:50,982 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=B 2024-12-17T12:37:50,982 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:50,982 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=C 2024-12-17T12:37:50,982 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:50,990 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/44d8695b1d124c17a9327487ca20bf0e is 50, key is test_row_0/A:col10/1734439070979/Put/seqid=0 2024-12-17T12:37:51,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741869_1045 (size=14541) 2024-12-17T12:37:51,010 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/44d8695b1d124c17a9327487ca20bf0e 2024-12-17T12:37:51,023 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:51,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35902 deadline: 1734439131019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:51,025 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:51,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35916 deadline: 1734439131021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:51,026 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:51,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35932 deadline: 1734439131022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:51,027 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/ffed733ce8cf4f14851563a97cdf5aab is 50, key is test_row_0/B:col10/1734439070979/Put/seqid=0 2024-12-17T12:37:51,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741870_1046 (size=12151) 2024-12-17T12:37:51,050 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/ffed733ce8cf4f14851563a97cdf5aab 2024-12-17T12:37:51,063 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/ef47535811454a9abd7051e622515da0 is 50, key is test_row_0/C:col10/1734439070979/Put/seqid=0 2024-12-17T12:37:51,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741871_1047 (size=12151) 2024-12-17T12:37:51,073 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/ef47535811454a9abd7051e622515da0 2024-12-17T12:37:51,081 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/44d8695b1d124c17a9327487ca20bf0e as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/44d8695b1d124c17a9327487ca20bf0e 2024-12-17T12:37:51,090 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/44d8695b1d124c17a9327487ca20bf0e, entries=200, sequenceid=168, filesize=14.2 K 2024-12-17T12:37:51,095 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/ffed733ce8cf4f14851563a97cdf5aab as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/ffed733ce8cf4f14851563a97cdf5aab 2024-12-17T12:37:51,103 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/ffed733ce8cf4f14851563a97cdf5aab, entries=150, sequenceid=168, filesize=11.9 K 2024-12-17T12:37:51,105 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/ef47535811454a9abd7051e622515da0 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/ef47535811454a9abd7051e622515da0 2024-12-17T12:37:51,113 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/ef47535811454a9abd7051e622515da0, entries=150, sequenceid=168, filesize=11.9 K 2024-12-17T12:37:51,114 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 502e77060db097ea5decbe44e66ef8e7 in 134ms, sequenceid=168, compaction requested=true 2024-12-17T12:37:51,114 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:37:51,115 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 502e77060db097ea5decbe44e66ef8e7:A, priority=-2147483648, current under compaction store size is 1 2024-12-17T12:37:51,115 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:37:51,115 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-17T12:37:51,115 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 502e77060db097ea5decbe44e66ef8e7:B, priority=-2147483648, current under compaction store size is 2 2024-12-17T12:37:51,115 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:37:51,115 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-17T12:37:51,115 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 502e77060db097ea5decbe44e66ef8e7:C, priority=-2147483648, current under compaction store size is 3 2024-12-17T12:37:51,115 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:37:51,117 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50934 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-17T12:37:51,117 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48544 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-17T12:37:51,118 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 502e77060db097ea5decbe44e66ef8e7/B is initiating minor compaction (all files) 2024-12-17T12:37:51,118 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): 502e77060db097ea5decbe44e66ef8e7/A is initiating minor compaction (all files) 2024-12-17T12:37:51,118 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 502e77060db097ea5decbe44e66ef8e7/B in TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:51,118 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 502e77060db097ea5decbe44e66ef8e7/A in TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:51,118 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/0d8a07a1f3184c388f94dea2cfc8a209, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/57968acc142d40488916b94031faee6a, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/ca0e8ee18bf3402394254cd37b42e01b, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/ffed733ce8cf4f14851563a97cdf5aab] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp, totalSize=47.4 K 2024-12-17T12:37:51,118 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/5758a35fd363413f9cbef2f0e4acbe8b, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/90999d8e166d4e41b5cedbd8bc188ca0, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/c06293551ffb4d82b30c438843da348c, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/44d8695b1d124c17a9327487ca20bf0e] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp, totalSize=49.7 K 2024-12-17T12:37:51,119 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 0d8a07a1f3184c388f94dea2cfc8a209, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1734439067918 2024-12-17T12:37:51,119 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5758a35fd363413f9cbef2f0e4acbe8b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1734439067918 2024-12-17T12:37:51,120 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 90999d8e166d4e41b5cedbd8bc188ca0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1734439068565 2024-12-17T12:37:51,120 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 57968acc142d40488916b94031faee6a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1734439068565 2024-12-17T12:37:51,121 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting c06293551ffb4d82b30c438843da348c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1734439069231 2024-12-17T12:37:51,121 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting ca0e8ee18bf3402394254cd37b42e01b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1734439069231 2024-12-17T12:37:51,121 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 44d8695b1d124c17a9327487ca20bf0e, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1734439070365 2024-12-17T12:37:51,122 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting ffed733ce8cf4f14851563a97cdf5aab, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1734439070365 2024-12-17T12:37:51,127 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 502e77060db097ea5decbe44e66ef8e7 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-17T12:37:51,128 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=A 2024-12-17T12:37:51,128 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:51,128 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=B 2024-12-17T12:37:51,128 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:51,128 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=C 2024-12-17T12:37:51,128 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:51,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 502e77060db097ea5decbe44e66ef8e7 2024-12-17T12:37:51,138 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/33d8004ba8754affbfae3628602aa3b0 is 50, key is test_row_0/A:col10/1734439071018/Put/seqid=0 2024-12-17T12:37:51,145 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 502e77060db097ea5decbe44e66ef8e7#B#compaction#34 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:37:51,146 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/8886c8ecf5194894b4a5310b953da21f is 50, key is test_row_0/B:col10/1734439070979/Put/seqid=0 2024-12-17T12:37:51,170 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:51,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35916 deadline: 1734439131169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:51,171 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:51,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35932 deadline: 1734439131170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:51,171 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:51,172 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 502e77060db097ea5decbe44e66ef8e7#A#compaction#35 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:37:51,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35902 deadline: 1734439131170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:51,173 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/4af00000a5d24b40b78e81303f3f57f9 is 50, key is test_row_0/A:col10/1734439070979/Put/seqid=0 2024-12-17T12:37:51,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741873_1049 (size=12527) 2024-12-17T12:37:51,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741872_1048 (size=14541) 2024-12-17T12:37:51,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741874_1050 (size=12527) 2024-12-17T12:37:51,273 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:51,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35916 deadline: 1734439131272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:51,274 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:51,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35932 deadline: 1734439131272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:51,274 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:51,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35902 deadline: 1734439131273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:51,376 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:51,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439131375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:51,383 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:51,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439131380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:51,477 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:51,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35916 deadline: 1734439131475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:51,477 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:51,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35902 deadline: 1734439131476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:51,478 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:51,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35932 deadline: 1734439131476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:51,582 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=193 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/33d8004ba8754affbfae3628602aa3b0 2024-12-17T12:37:51,586 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/8886c8ecf5194894b4a5310b953da21f as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/8886c8ecf5194894b4a5310b953da21f 2024-12-17T12:37:51,593 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/4af00000a5d24b40b78e81303f3f57f9 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/4af00000a5d24b40b78e81303f3f57f9 2024-12-17T12:37:51,596 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 502e77060db097ea5decbe44e66ef8e7/B of 502e77060db097ea5decbe44e66ef8e7 into 8886c8ecf5194894b4a5310b953da21f(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:37:51,596 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:37:51,596 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7., storeName=502e77060db097ea5decbe44e66ef8e7/B, priority=12, startTime=1734439071115; duration=0sec 2024-12-17T12:37:51,596 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:37:51,597 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 502e77060db097ea5decbe44e66ef8e7:B 2024-12-17T12:37:51,597 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-17T12:37:51,601 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48544 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-17T12:37:51,601 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 502e77060db097ea5decbe44e66ef8e7/C is initiating minor compaction (all files) 2024-12-17T12:37:51,601 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 502e77060db097ea5decbe44e66ef8e7/C in TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:51,602 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/44481a16d4f64e3f8d5452eaa8098926, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/939be3926f9842c0845cb6ae7b0f734a, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/431e4ec352cf40c9806938a5bfd49b2b, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/ef47535811454a9abd7051e622515da0] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp, totalSize=47.4 K 2024-12-17T12:37:51,603 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/7d34b52045204eb6ac14446483f1a6d7 is 50, key is test_row_0/B:col10/1734439071018/Put/seqid=0 2024-12-17T12:37:51,604 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 502e77060db097ea5decbe44e66ef8e7/A of 502e77060db097ea5decbe44e66ef8e7 into 4af00000a5d24b40b78e81303f3f57f9(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:37:51,604 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:37:51,604 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7., storeName=502e77060db097ea5decbe44e66ef8e7/A, priority=12, startTime=1734439071115; duration=0sec 2024-12-17T12:37:51,604 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:37:51,604 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 502e77060db097ea5decbe44e66ef8e7:A 2024-12-17T12:37:51,605 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 44481a16d4f64e3f8d5452eaa8098926, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1734439067918 2024-12-17T12:37:51,606 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 939be3926f9842c0845cb6ae7b0f734a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1734439068565 2024-12-17T12:37:51,607 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 431e4ec352cf40c9806938a5bfd49b2b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1734439069231 2024-12-17T12:37:51,608 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting ef47535811454a9abd7051e622515da0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1734439070365 2024-12-17T12:37:51,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741875_1051 (size=12151) 2024-12-17T12:37:51,642 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=193 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/7d34b52045204eb6ac14446483f1a6d7 2024-12-17T12:37:51,650 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 502e77060db097ea5decbe44e66ef8e7#C#compaction#37 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:37:51,651 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/6032712651c6482a865f23372ae6b805 is 50, key is test_row_0/C:col10/1734439070979/Put/seqid=0 2024-12-17T12:37:51,655 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/39874262d1564854b1e1e2c47b55326e is 50, key is test_row_0/C:col10/1734439071018/Put/seqid=0 2024-12-17T12:37:51,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741877_1053 (size=12151) 2024-12-17T12:37:51,667 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=193 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/39874262d1564854b1e1e2c47b55326e 2024-12-17T12:37:51,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741876_1052 (size=12527) 2024-12-17T12:37:51,677 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/33d8004ba8754affbfae3628602aa3b0 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/33d8004ba8754affbfae3628602aa3b0 2024-12-17T12:37:51,678 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/6032712651c6482a865f23372ae6b805 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/6032712651c6482a865f23372ae6b805 2024-12-17T12:37:51,685 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/33d8004ba8754affbfae3628602aa3b0, entries=200, sequenceid=193, filesize=14.2 K 2024-12-17T12:37:51,688 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/7d34b52045204eb6ac14446483f1a6d7 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/7d34b52045204eb6ac14446483f1a6d7 2024-12-17T12:37:51,688 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 502e77060db097ea5decbe44e66ef8e7/C of 502e77060db097ea5decbe44e66ef8e7 into 6032712651c6482a865f23372ae6b805(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:37:51,688 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:37:51,688 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7., storeName=502e77060db097ea5decbe44e66ef8e7/C, priority=12, startTime=1734439071115; duration=0sec 2024-12-17T12:37:51,688 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:37:51,688 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 502e77060db097ea5decbe44e66ef8e7:C 2024-12-17T12:37:51,698 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/7d34b52045204eb6ac14446483f1a6d7, entries=150, sequenceid=193, filesize=11.9 K 2024-12-17T12:37:51,700 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/39874262d1564854b1e1e2c47b55326e as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/39874262d1564854b1e1e2c47b55326e 2024-12-17T12:37:51,711 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/39874262d1564854b1e1e2c47b55326e, entries=150, sequenceid=193, filesize=11.9 K 2024-12-17T12:37:51,712 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 502e77060db097ea5decbe44e66ef8e7 in 585ms, sequenceid=193, compaction requested=false 2024-12-17T12:37:51,712 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:37:51,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 502e77060db097ea5decbe44e66ef8e7 2024-12-17T12:37:51,782 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 502e77060db097ea5decbe44e66ef8e7 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-17T12:37:51,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=A 2024-12-17T12:37:51,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:51,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=B 2024-12-17T12:37:51,785 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:51,785 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=C 2024-12-17T12:37:51,785 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:51,790 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/b560ecc34ba1453b8d095aa2363b8b48 is 50, key is test_row_0/A:col10/1734439071781/Put/seqid=0 2024-12-17T12:37:51,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741878_1054 (size=14541) 2024-12-17T12:37:51,804 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/b560ecc34ba1453b8d095aa2363b8b48 2024-12-17T12:37:51,824 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/d5b28b2981cb4dbf9433dbcca3fa3108 is 50, key is test_row_0/B:col10/1734439071781/Put/seqid=0 2024-12-17T12:37:51,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741879_1055 (size=12151) 2024-12-17T12:37:51,844 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:51,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35916 deadline: 1734439131842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:51,844 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:51,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35902 deadline: 1734439131842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:51,845 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:51,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35932 deadline: 1734439131842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:51,946 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:51,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35916 deadline: 1734439131946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:51,947 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:51,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35902 deadline: 1734439131946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:51,952 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:51,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35932 deadline: 1734439131951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:52,151 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:52,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35902 deadline: 1734439132149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:52,156 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:52,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35916 deadline: 1734439132156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:52,158 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:52,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35932 deadline: 1734439132157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:52,234 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/d5b28b2981cb4dbf9433dbcca3fa3108 2024-12-17T12:37:52,359 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/4dd6bbbaf4f046cdac9e64044a8351b8 is 50, key is test_row_0/C:col10/1734439071781/Put/seqid=0 2024-12-17T12:37:52,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741880_1056 (size=12151) 2024-12-17T12:37:52,374 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/4dd6bbbaf4f046cdac9e64044a8351b8 2024-12-17T12:37:52,382 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/b560ecc34ba1453b8d095aa2363b8b48 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/b560ecc34ba1453b8d095aa2363b8b48 2024-12-17T12:37:52,391 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/b560ecc34ba1453b8d095aa2363b8b48, entries=200, sequenceid=211, filesize=14.2 K 2024-12-17T12:37:52,393 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/d5b28b2981cb4dbf9433dbcca3fa3108 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/d5b28b2981cb4dbf9433dbcca3fa3108 2024-12-17T12:37:52,402 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/d5b28b2981cb4dbf9433dbcca3fa3108, entries=150, sequenceid=211, filesize=11.9 K 2024-12-17T12:37:52,403 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/4dd6bbbaf4f046cdac9e64044a8351b8 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/4dd6bbbaf4f046cdac9e64044a8351b8 2024-12-17T12:37:52,413 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/4dd6bbbaf4f046cdac9e64044a8351b8, entries=150, sequenceid=211, filesize=11.9 K 2024-12-17T12:37:52,414 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 502e77060db097ea5decbe44e66ef8e7 in 632ms, sequenceid=211, compaction requested=true 2024-12-17T12:37:52,415 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:37:52,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 502e77060db097ea5decbe44e66ef8e7:A, priority=-2147483648, current under compaction store size is 1 2024-12-17T12:37:52,415 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:37:52,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:37:52,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 502e77060db097ea5decbe44e66ef8e7:B, priority=-2147483648, current under compaction store size is 2 2024-12-17T12:37:52,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:37:52,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 502e77060db097ea5decbe44e66ef8e7:C, priority=-2147483648, current under compaction store size is 3 2024-12-17T12:37:52,415 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:37:52,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:37:52,417 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:37:52,417 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 41609 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:37:52,417 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): 502e77060db097ea5decbe44e66ef8e7/A is initiating minor compaction (all files) 2024-12-17T12:37:52,417 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 502e77060db097ea5decbe44e66ef8e7/B is initiating minor compaction (all files) 2024-12-17T12:37:52,417 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 502e77060db097ea5decbe44e66ef8e7/B in TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:52,417 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 502e77060db097ea5decbe44e66ef8e7/A in TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:52,417 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/8886c8ecf5194894b4a5310b953da21f, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/7d34b52045204eb6ac14446483f1a6d7, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/d5b28b2981cb4dbf9433dbcca3fa3108] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp, totalSize=36.0 K 2024-12-17T12:37:52,417 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/4af00000a5d24b40b78e81303f3f57f9, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/33d8004ba8754affbfae3628602aa3b0, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/b560ecc34ba1453b8d095aa2363b8b48] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp, totalSize=40.6 K 2024-12-17T12:37:52,418 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 8886c8ecf5194894b4a5310b953da21f, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1734439070365 2024-12-17T12:37:52,418 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4af00000a5d24b40b78e81303f3f57f9, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1734439070365 2024-12-17T12:37:52,419 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 7d34b52045204eb6ac14446483f1a6d7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1734439071018 2024-12-17T12:37:52,419 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 33d8004ba8754affbfae3628602aa3b0, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1734439071008 2024-12-17T12:37:52,419 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting d5b28b2981cb4dbf9433dbcca3fa3108, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1734439071138 2024-12-17T12:37:52,420 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting b560ecc34ba1453b8d095aa2363b8b48, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1734439071138 2024-12-17T12:37:52,433 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 502e77060db097ea5decbe44e66ef8e7#B#compaction#42 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:37:52,434 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/98d15c66e83643099e52749ac42a1fe4 is 50, key is test_row_0/B:col10/1734439071781/Put/seqid=0 2024-12-17T12:37:52,436 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 502e77060db097ea5decbe44e66ef8e7#A#compaction#43 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:37:52,437 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/cfeef11b96964351b48aaface1b1cb83 is 50, key is test_row_0/A:col10/1734439071781/Put/seqid=0 2024-12-17T12:37:52,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741882_1058 (size=12629) 2024-12-17T12:37:52,457 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 502e77060db097ea5decbe44e66ef8e7 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-17T12:37:52,457 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=A 2024-12-17T12:37:52,457 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:52,457 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=B 2024-12-17T12:37:52,457 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:52,457 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=C 2024-12-17T12:37:52,458 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:52,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 502e77060db097ea5decbe44e66ef8e7 2024-12-17T12:37:52,459 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/cfeef11b96964351b48aaface1b1cb83 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/cfeef11b96964351b48aaface1b1cb83 2024-12-17T12:37:52,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741881_1057 (size=12629) 2024-12-17T12:37:52,465 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/dbe694f38fe94e96aa216066177221e3 is 50, key is test_row_0/A:col10/1734439071810/Put/seqid=0 2024-12-17T12:37:52,476 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 502e77060db097ea5decbe44e66ef8e7/A of 502e77060db097ea5decbe44e66ef8e7 into cfeef11b96964351b48aaface1b1cb83(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:37:52,476 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:37:52,476 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7., storeName=502e77060db097ea5decbe44e66ef8e7/A, priority=13, startTime=1734439072415; duration=0sec 2024-12-17T12:37:52,477 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:37:52,477 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 502e77060db097ea5decbe44e66ef8e7:A 2024-12-17T12:37:52,477 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:37:52,480 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:37:52,480 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): 502e77060db097ea5decbe44e66ef8e7/C is initiating minor compaction (all files) 2024-12-17T12:37:52,480 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 502e77060db097ea5decbe44e66ef8e7/C in TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:52,480 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/6032712651c6482a865f23372ae6b805, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/39874262d1564854b1e1e2c47b55326e, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/4dd6bbbaf4f046cdac9e64044a8351b8] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp, totalSize=36.0 K 2024-12-17T12:37:52,481 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6032712651c6482a865f23372ae6b805, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1734439070365 2024-12-17T12:37:52,482 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/98d15c66e83643099e52749ac42a1fe4 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/98d15c66e83643099e52749ac42a1fe4 2024-12-17T12:37:52,482 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 39874262d1564854b1e1e2c47b55326e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1734439071018 2024-12-17T12:37:52,483 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4dd6bbbaf4f046cdac9e64044a8351b8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1734439071138 2024-12-17T12:37:52,485 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:52,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35902 deadline: 1734439132479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:52,488 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:52,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35916 deadline: 1734439132484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:52,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741883_1059 (size=12151) 2024-12-17T12:37:52,490 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:52,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35932 deadline: 1734439132485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:52,491 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/dbe694f38fe94e96aa216066177221e3 2024-12-17T12:37:52,496 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 502e77060db097ea5decbe44e66ef8e7/B of 502e77060db097ea5decbe44e66ef8e7 into 98d15c66e83643099e52749ac42a1fe4(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:37:52,497 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:37:52,497 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7., storeName=502e77060db097ea5decbe44e66ef8e7/B, priority=13, startTime=1734439072415; duration=0sec 2024-12-17T12:37:52,497 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:37:52,497 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 502e77060db097ea5decbe44e66ef8e7:B 2024-12-17T12:37:52,507 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/8d888a85fe904338af070b42ccf1d080 is 50, key is test_row_0/B:col10/1734439071810/Put/seqid=0 2024-12-17T12:37:52,509 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 502e77060db097ea5decbe44e66ef8e7#C#compaction#46 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:37:52,511 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/bebc2ec80fa9459da84143db7497e549 is 50, key is test_row_0/C:col10/1734439071781/Put/seqid=0 2024-12-17T12:37:52,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741884_1060 (size=12151) 2024-12-17T12:37:52,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741885_1061 (size=12629) 2024-12-17T12:37:52,539 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/bebc2ec80fa9459da84143db7497e549 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/bebc2ec80fa9459da84143db7497e549 2024-12-17T12:37:52,548 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 502e77060db097ea5decbe44e66ef8e7/C of 502e77060db097ea5decbe44e66ef8e7 into bebc2ec80fa9459da84143db7497e549(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:37:52,548 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:37:52,548 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7., storeName=502e77060db097ea5decbe44e66ef8e7/C, priority=13, startTime=1734439072415; duration=0sec 2024-12-17T12:37:52,548 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:37:52,549 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 502e77060db097ea5decbe44e66ef8e7:C 2024-12-17T12:37:52,588 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:52,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35902 deadline: 1734439132587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:52,590 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:52,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35916 deadline: 1734439132589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:52,593 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:52,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35932 deadline: 1734439132592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:52,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-17T12:37:52,676 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 16 completed 2024-12-17T12:37:52,678 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-17T12:37:52,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees 2024-12-17T12:37:52,681 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-17T12:37:52,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-17T12:37:52,682 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-17T12:37:52,683 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-17T12:37:52,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-17T12:37:52,791 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:52,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35902 deadline: 1734439132790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:52,792 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:52,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35916 deadline: 1734439132792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:52,797 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:52,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35932 deadline: 1734439132795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:52,836 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:52,836 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-17T12:37:52,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:52,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. as already flushing 2024-12-17T12:37:52,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:52,837 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:52,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:52,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:52,919 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/8d888a85fe904338af070b42ccf1d080 2024-12-17T12:37:52,929 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/31229da7e34045ee9614d75bd6625da6 is 50, key is test_row_0/C:col10/1734439071810/Put/seqid=0 2024-12-17T12:37:52,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741886_1062 (size=12151) 2024-12-17T12:37:52,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-17T12:37:52,990 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:52,991 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-17T12:37:52,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:52,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. as already flushing 2024-12-17T12:37:52,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:52,991 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:52,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:52,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:53,094 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:53,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35916 deadline: 1734439133094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:53,097 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:53,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35902 deadline: 1734439133095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:53,101 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:53,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35932 deadline: 1734439133100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:53,143 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:53,146 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-17T12:37:53,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:53,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. as already flushing 2024-12-17T12:37:53,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:53,146 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:53,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:53,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:53,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-17T12:37:53,299 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:53,300 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-17T12:37:53,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:53,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. as already flushing 2024-12-17T12:37:53,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:53,300 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:53,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:53,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:53,334 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/31229da7e34045ee9614d75bd6625da6 2024-12-17T12:37:53,352 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/dbe694f38fe94e96aa216066177221e3 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/dbe694f38fe94e96aa216066177221e3 2024-12-17T12:37:53,363 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/dbe694f38fe94e96aa216066177221e3, entries=150, sequenceid=234, filesize=11.9 K 2024-12-17T12:37:53,364 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/8d888a85fe904338af070b42ccf1d080 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/8d888a85fe904338af070b42ccf1d080 2024-12-17T12:37:53,371 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/8d888a85fe904338af070b42ccf1d080, entries=150, sequenceid=234, filesize=11.9 K 2024-12-17T12:37:53,373 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/31229da7e34045ee9614d75bd6625da6 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/31229da7e34045ee9614d75bd6625da6 2024-12-17T12:37:53,382 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/31229da7e34045ee9614d75bd6625da6, entries=150, sequenceid=234, filesize=11.9 K 2024-12-17T12:37:53,384 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 502e77060db097ea5decbe44e66ef8e7 in 926ms, sequenceid=234, compaction requested=false 2024-12-17T12:37:53,384 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:37:53,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 502e77060db097ea5decbe44e66ef8e7 2024-12-17T12:37:53,387 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 502e77060db097ea5decbe44e66ef8e7 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-17T12:37:53,387 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=A 2024-12-17T12:37:53,387 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:53,387 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=B 2024-12-17T12:37:53,387 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:53,387 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=C 2024-12-17T12:37:53,388 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:53,393 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/ef0b1bb1c7184e8290ead489a09c132d is 50, key is test_row_0/A:col10/1734439073385/Put/seqid=0 2024-12-17T12:37:53,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741887_1063 (size=14541) 2024-12-17T12:37:53,401 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/ef0b1bb1c7184e8290ead489a09c132d 2024-12-17T12:37:53,411 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/9aaa5d09c17e46049befe677e4fac467 is 50, key is test_row_0/B:col10/1734439073385/Put/seqid=0 2024-12-17T12:37:53,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741888_1064 (size=12151) 2024-12-17T12:37:53,425 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:53,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439133422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:53,427 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:53,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439133425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:53,453 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:53,454 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-17T12:37:53,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:53,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. as already flushing 2024-12-17T12:37:53,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:53,454 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:53,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:53,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:53,527 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:53,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439133526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:53,543 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:53,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439133542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:53,601 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:53,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35916 deadline: 1734439133600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:53,602 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:53,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35932 deadline: 1734439133602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:53,604 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:53,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35902 deadline: 1734439133603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:53,607 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:53,608 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-17T12:37:53,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:53,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. as already flushing 2024-12-17T12:37:53,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:53,608 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:53,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:53,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:53,729 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:53,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439133728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:53,747 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:53,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439133746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:53,760 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:53,760 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-17T12:37:53,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:53,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. as already flushing 2024-12-17T12:37:53,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:53,761 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:53,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:53,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:53,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-17T12:37:53,824 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/9aaa5d09c17e46049befe677e4fac467 2024-12-17T12:37:53,833 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/df5651d7cda04e6db7bcf471ff98e73b is 50, key is test_row_0/C:col10/1734439073385/Put/seqid=0 2024-12-17T12:37:53,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741889_1065 (size=12151) 2024-12-17T12:37:53,913 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:53,913 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-17T12:37:53,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:53,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. as already flushing 2024-12-17T12:37:53,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:53,913 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:53,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:53,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:54,034 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:54,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439134031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:54,050 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:54,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439134050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:54,066 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:54,066 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-17T12:37:54,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:54,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. as already flushing 2024-12-17T12:37:54,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:54,067 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:54,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:54,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:54,219 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:54,220 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-17T12:37:54,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:54,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. as already flushing 2024-12-17T12:37:54,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:54,220 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:54,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:54,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:54,258 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/df5651d7cda04e6db7bcf471ff98e73b 2024-12-17T12:37:54,264 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/ef0b1bb1c7184e8290ead489a09c132d as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/ef0b1bb1c7184e8290ead489a09c132d 2024-12-17T12:37:54,272 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/ef0b1bb1c7184e8290ead489a09c132d, entries=200, sequenceid=251, filesize=14.2 K 2024-12-17T12:37:54,274 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/9aaa5d09c17e46049befe677e4fac467 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/9aaa5d09c17e46049befe677e4fac467 2024-12-17T12:37:54,281 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/9aaa5d09c17e46049befe677e4fac467, entries=150, sequenceid=251, filesize=11.9 K 2024-12-17T12:37:54,282 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/df5651d7cda04e6db7bcf471ff98e73b as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/df5651d7cda04e6db7bcf471ff98e73b 2024-12-17T12:37:54,289 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/df5651d7cda04e6db7bcf471ff98e73b, entries=150, sequenceid=251, filesize=11.9 K 2024-12-17T12:37:54,291 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 502e77060db097ea5decbe44e66ef8e7 in 904ms, sequenceid=251, compaction requested=true 2024-12-17T12:37:54,291 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:37:54,291 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 502e77060db097ea5decbe44e66ef8e7:A, priority=-2147483648, current under compaction store size is 1 2024-12-17T12:37:54,291 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:37:54,291 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:37:54,291 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 502e77060db097ea5decbe44e66ef8e7:B, priority=-2147483648, current under compaction store size is 2 2024-12-17T12:37:54,291 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:37:54,291 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:37:54,291 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 502e77060db097ea5decbe44e66ef8e7:C, priority=-2147483648, current under compaction store size is 3 2024-12-17T12:37:54,291 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:37:54,292 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39321 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:37:54,293 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): 502e77060db097ea5decbe44e66ef8e7/A is initiating minor compaction (all files) 2024-12-17T12:37:54,293 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 502e77060db097ea5decbe44e66ef8e7/A in TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:54,293 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/cfeef11b96964351b48aaface1b1cb83, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/dbe694f38fe94e96aa216066177221e3, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/ef0b1bb1c7184e8290ead489a09c132d] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp, totalSize=38.4 K 2024-12-17T12:37:54,293 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting cfeef11b96964351b48aaface1b1cb83, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1734439071138 2024-12-17T12:37:54,294 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:37:54,294 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 502e77060db097ea5decbe44e66ef8e7/B is initiating minor compaction (all files) 2024-12-17T12:37:54,294 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 502e77060db097ea5decbe44e66ef8e7/B in TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:54,294 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/98d15c66e83643099e52749ac42a1fe4, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/8d888a85fe904338af070b42ccf1d080, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/9aaa5d09c17e46049befe677e4fac467] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp, totalSize=36.1 K 2024-12-17T12:37:54,294 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting dbe694f38fe94e96aa216066177221e3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1734439071810 2024-12-17T12:37:54,294 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 98d15c66e83643099e52749ac42a1fe4, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1734439071138 2024-12-17T12:37:54,295 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting ef0b1bb1c7184e8290ead489a09c132d, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1734439072471 2024-12-17T12:37:54,295 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 8d888a85fe904338af070b42ccf1d080, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1734439071810 2024-12-17T12:37:54,295 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 9aaa5d09c17e46049befe677e4fac467, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1734439072471 2024-12-17T12:37:54,305 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 502e77060db097ea5decbe44e66ef8e7#B#compaction#52 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:37:54,305 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 502e77060db097ea5decbe44e66ef8e7#A#compaction#51 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:37:54,306 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/2e014e5ada034cb7a1090d0f1f56cf50 is 50, key is test_row_0/B:col10/1734439073385/Put/seqid=0 2024-12-17T12:37:54,307 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/8106759b128a4083a55da4c1663517bb is 50, key is test_row_0/A:col10/1734439073385/Put/seqid=0 2024-12-17T12:37:54,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741890_1066 (size=12731) 2024-12-17T12:37:54,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741891_1067 (size=12731) 2024-12-17T12:37:54,372 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:54,373 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-17T12:37:54,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:54,373 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing 502e77060db097ea5decbe44e66ef8e7 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-17T12:37:54,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=A 2024-12-17T12:37:54,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:54,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=B 2024-12-17T12:37:54,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:54,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=C 2024-12-17T12:37:54,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:54,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/ec4b1d0e21704903b3da83654f26d2d9 is 50, key is test_row_0/A:col10/1734439073417/Put/seqid=0 2024-12-17T12:37:54,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741892_1068 (size=12301) 2024-12-17T12:37:54,540 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. as already flushing 2024-12-17T12:37:54,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 502e77060db097ea5decbe44e66ef8e7 2024-12-17T12:37:54,560 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:54,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439134558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:54,560 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:54,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439134559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:54,604 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:54,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35916 deadline: 1734439134602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:54,608 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:54,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35932 deadline: 1734439134608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:54,615 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:54,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35902 deadline: 1734439134613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:54,662 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:54,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439134661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:54,662 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:54,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439134661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:54,727 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/2e014e5ada034cb7a1090d0f1f56cf50 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/2e014e5ada034cb7a1090d0f1f56cf50 2024-12-17T12:37:54,734 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 502e77060db097ea5decbe44e66ef8e7/B of 502e77060db097ea5decbe44e66ef8e7 into 2e014e5ada034cb7a1090d0f1f56cf50(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:37:54,734 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:37:54,734 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7., storeName=502e77060db097ea5decbe44e66ef8e7/B, priority=13, startTime=1734439074291; duration=0sec 2024-12-17T12:37:54,735 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:37:54,735 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 502e77060db097ea5decbe44e66ef8e7:B 2024-12-17T12:37:54,735 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:37:54,736 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:37:54,736 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 502e77060db097ea5decbe44e66ef8e7/C is initiating minor compaction (all files) 2024-12-17T12:37:54,737 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 502e77060db097ea5decbe44e66ef8e7/C in TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:54,737 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/bebc2ec80fa9459da84143db7497e549, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/31229da7e34045ee9614d75bd6625da6, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/df5651d7cda04e6db7bcf471ff98e73b] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp, totalSize=36.1 K 2024-12-17T12:37:54,737 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting bebc2ec80fa9459da84143db7497e549, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1734439071138 2024-12-17T12:37:54,737 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/8106759b128a4083a55da4c1663517bb as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/8106759b128a4083a55da4c1663517bb 2024-12-17T12:37:54,738 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 31229da7e34045ee9614d75bd6625da6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1734439071810 2024-12-17T12:37:54,739 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting df5651d7cda04e6db7bcf471ff98e73b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1734439072471 2024-12-17T12:37:54,744 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 502e77060db097ea5decbe44e66ef8e7/A of 502e77060db097ea5decbe44e66ef8e7 into 8106759b128a4083a55da4c1663517bb(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:37:54,744 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:37:54,744 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7., storeName=502e77060db097ea5decbe44e66ef8e7/A, priority=13, startTime=1734439074291; duration=0sec 2024-12-17T12:37:54,745 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:37:54,745 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 502e77060db097ea5decbe44e66ef8e7:A 2024-12-17T12:37:54,748 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 502e77060db097ea5decbe44e66ef8e7#C#compaction#54 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:37:54,750 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/c07fd6ce56df4383b946be742327fdaf is 50, key is test_row_0/C:col10/1734439073385/Put/seqid=0 2024-12-17T12:37:54,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741893_1069 (size=12731) 2024-12-17T12:37:54,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-17T12:37:54,788 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/ec4b1d0e21704903b3da83654f26d2d9 2024-12-17T12:37:54,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/011610c4a8c74897a47058143249e8a4 is 50, key is test_row_0/B:col10/1734439073417/Put/seqid=0 2024-12-17T12:37:54,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741894_1070 (size=12301) 2024-12-17T12:37:54,865 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:54,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439134865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:54,865 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:54,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439134865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:55,168 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:55,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439135166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:55,170 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:55,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439135168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:55,171 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/c07fd6ce56df4383b946be742327fdaf as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/c07fd6ce56df4383b946be742327fdaf 2024-12-17T12:37:55,176 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 502e77060db097ea5decbe44e66ef8e7/C of 502e77060db097ea5decbe44e66ef8e7 into c07fd6ce56df4383b946be742327fdaf(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:37:55,176 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:37:55,176 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7., storeName=502e77060db097ea5decbe44e66ef8e7/C, priority=13, startTime=1734439074291; duration=0sec 2024-12-17T12:37:55,176 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:37:55,177 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 502e77060db097ea5decbe44e66ef8e7:C 2024-12-17T12:37:55,201 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/011610c4a8c74897a47058143249e8a4 2024-12-17T12:37:55,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/e513e291c287428eb3b7ee23863d60f6 is 50, key is test_row_0/C:col10/1734439073417/Put/seqid=0 2024-12-17T12:37:55,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741895_1071 (size=12301) 2024-12-17T12:37:55,218 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/e513e291c287428eb3b7ee23863d60f6 2024-12-17T12:37:55,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/ec4b1d0e21704903b3da83654f26d2d9 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/ec4b1d0e21704903b3da83654f26d2d9 2024-12-17T12:37:55,233 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/ec4b1d0e21704903b3da83654f26d2d9, entries=150, sequenceid=274, filesize=12.0 K 2024-12-17T12:37:55,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/011610c4a8c74897a47058143249e8a4 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/011610c4a8c74897a47058143249e8a4 2024-12-17T12:37:55,240 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/011610c4a8c74897a47058143249e8a4, entries=150, sequenceid=274, filesize=12.0 K 2024-12-17T12:37:55,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/e513e291c287428eb3b7ee23863d60f6 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/e513e291c287428eb3b7ee23863d60f6 2024-12-17T12:37:55,252 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/e513e291c287428eb3b7ee23863d60f6, entries=150, sequenceid=274, filesize=12.0 K 2024-12-17T12:37:55,253 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 502e77060db097ea5decbe44e66ef8e7 in 879ms, sequenceid=274, compaction requested=false 2024-12-17T12:37:55,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:37:55,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:55,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-12-17T12:37:55,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-12-17T12:37:55,257 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-12-17T12:37:55,257 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5710 sec 2024-12-17T12:37:55,260 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees in 2.5800 sec 2024-12-17T12:37:55,673 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 502e77060db097ea5decbe44e66ef8e7 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-17T12:37:55,674 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=A 2024-12-17T12:37:55,674 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:55,674 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=B 2024-12-17T12:37:55,674 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:55,674 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=C 2024-12-17T12:37:55,674 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:55,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 502e77060db097ea5decbe44e66ef8e7 2024-12-17T12:37:55,681 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/a39acdd236a04cb5905c702a310c215c is 50, key is test_row_0/A:col10/1734439075672/Put/seqid=0 2024-12-17T12:37:55,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741896_1072 (size=12301) 2024-12-17T12:37:55,691 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/a39acdd236a04cb5905c702a310c215c 2024-12-17T12:37:55,719 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/24b6425720574c15a3e42019508d6276 is 50, key is test_row_0/B:col10/1734439075672/Put/seqid=0 2024-12-17T12:37:55,745 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:55,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439135742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:55,746 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:55,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439135742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:55,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741897_1073 (size=12301) 2024-12-17T12:37:55,753 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/24b6425720574c15a3e42019508d6276 2024-12-17T12:37:55,765 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/7b8b8d6ce5e44fa3912903cdedbff12a is 50, key is test_row_0/C:col10/1734439075672/Put/seqid=0 2024-12-17T12:37:55,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741898_1074 (size=12301) 2024-12-17T12:37:55,773 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/7b8b8d6ce5e44fa3912903cdedbff12a 2024-12-17T12:37:55,781 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/a39acdd236a04cb5905c702a310c215c as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/a39acdd236a04cb5905c702a310c215c 2024-12-17T12:37:55,789 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/a39acdd236a04cb5905c702a310c215c, entries=150, sequenceid=292, filesize=12.0 K 2024-12-17T12:37:55,791 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/24b6425720574c15a3e42019508d6276 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/24b6425720574c15a3e42019508d6276 2024-12-17T12:37:55,799 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/24b6425720574c15a3e42019508d6276, entries=150, sequenceid=292, filesize=12.0 K 2024-12-17T12:37:55,800 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/7b8b8d6ce5e44fa3912903cdedbff12a as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/7b8b8d6ce5e44fa3912903cdedbff12a 2024-12-17T12:37:55,807 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/7b8b8d6ce5e44fa3912903cdedbff12a, entries=150, sequenceid=292, filesize=12.0 K 2024-12-17T12:37:55,808 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 502e77060db097ea5decbe44e66ef8e7 in 135ms, sequenceid=292, compaction requested=true 2024-12-17T12:37:55,808 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:37:55,808 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 502e77060db097ea5decbe44e66ef8e7:A, priority=-2147483648, current under compaction store size is 1 2024-12-17T12:37:55,808 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:37:55,808 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:37:55,808 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:37:55,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 502e77060db097ea5decbe44e66ef8e7:B, priority=-2147483648, current under compaction store size is 2 2024-12-17T12:37:55,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:37:55,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 502e77060db097ea5decbe44e66ef8e7:C, priority=-2147483648, current under compaction store size is 3 2024-12-17T12:37:55,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:37:55,810 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:37:55,810 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 502e77060db097ea5decbe44e66ef8e7/B is initiating minor compaction (all files) 2024-12-17T12:37:55,811 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 502e77060db097ea5decbe44e66ef8e7/B in TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:55,811 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/2e014e5ada034cb7a1090d0f1f56cf50, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/011610c4a8c74897a47058143249e8a4, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/24b6425720574c15a3e42019508d6276] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp, totalSize=36.5 K 2024-12-17T12:37:55,812 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 2e014e5ada034cb7a1090d0f1f56cf50, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1734439072471 2024-12-17T12:37:55,813 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:37:55,813 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): 502e77060db097ea5decbe44e66ef8e7/A is initiating minor compaction (all files) 2024-12-17T12:37:55,813 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 502e77060db097ea5decbe44e66ef8e7/A in TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:55,813 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/8106759b128a4083a55da4c1663517bb, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/ec4b1d0e21704903b3da83654f26d2d9, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/a39acdd236a04cb5905c702a310c215c] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp, totalSize=36.5 K 2024-12-17T12:37:55,814 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 011610c4a8c74897a47058143249e8a4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1734439073417 2024-12-17T12:37:55,814 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8106759b128a4083a55da4c1663517bb, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1734439072471 2024-12-17T12:37:55,815 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 24b6425720574c15a3e42019508d6276, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1734439074558 2024-12-17T12:37:55,815 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting ec4b1d0e21704903b3da83654f26d2d9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1734439073417 2024-12-17T12:37:55,815 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting a39acdd236a04cb5905c702a310c215c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1734439074558 2024-12-17T12:37:55,825 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 502e77060db097ea5decbe44e66ef8e7#A#compaction#60 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:37:55,826 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/4af037d188164a58a11e75238e4d2508 is 50, key is test_row_0/A:col10/1734439075672/Put/seqid=0 2024-12-17T12:37:55,828 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 502e77060db097ea5decbe44e66ef8e7#B#compaction#61 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:37:55,828 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/c02abcacd306427d8fb14470406432f9 is 50, key is test_row_0/B:col10/1734439075672/Put/seqid=0 2024-12-17T12:37:55,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 502e77060db097ea5decbe44e66ef8e7 2024-12-17T12:37:55,850 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 502e77060db097ea5decbe44e66ef8e7 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-17T12:37:55,851 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=A 2024-12-17T12:37:55,851 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:55,851 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=B 2024-12-17T12:37:55,851 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:55,851 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=C 2024-12-17T12:37:55,851 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:55,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741900_1076 (size=12983) 2024-12-17T12:37:55,869 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/7f4d016efb9f4683980fd22a938634cd is 50, key is test_row_0/A:col10/1734439075725/Put/seqid=0 2024-12-17T12:37:55,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741899_1075 (size=12983) 2024-12-17T12:37:55,874 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:55,874 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:55,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439135873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:55,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439135871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:55,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741901_1077 (size=12301) 2024-12-17T12:37:55,977 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:55,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439135976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:55,979 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:55,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439135976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:56,179 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:56,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439136178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:56,181 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:56,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439136181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:56,271 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/c02abcacd306427d8fb14470406432f9 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/c02abcacd306427d8fb14470406432f9 2024-12-17T12:37:56,278 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/4af037d188164a58a11e75238e4d2508 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/4af037d188164a58a11e75238e4d2508 2024-12-17T12:37:56,280 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 502e77060db097ea5decbe44e66ef8e7/B of 502e77060db097ea5decbe44e66ef8e7 into c02abcacd306427d8fb14470406432f9(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:37:56,280 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:37:56,280 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7., storeName=502e77060db097ea5decbe44e66ef8e7/B, priority=13, startTime=1734439075808; duration=0sec 2024-12-17T12:37:56,280 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:37:56,280 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 502e77060db097ea5decbe44e66ef8e7:B 2024-12-17T12:37:56,280 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:37:56,281 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:37:56,281 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 502e77060db097ea5decbe44e66ef8e7/C is initiating minor compaction (all files) 2024-12-17T12:37:56,281 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 502e77060db097ea5decbe44e66ef8e7/C in TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:56,282 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/c07fd6ce56df4383b946be742327fdaf, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/e513e291c287428eb3b7ee23863d60f6, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/7b8b8d6ce5e44fa3912903cdedbff12a] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp, totalSize=36.5 K 2024-12-17T12:37:56,283 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting c07fd6ce56df4383b946be742327fdaf, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1734439072471 2024-12-17T12:37:56,284 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting e513e291c287428eb3b7ee23863d60f6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1734439073417 2024-12-17T12:37:56,285 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 7b8b8d6ce5e44fa3912903cdedbff12a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1734439074558 2024-12-17T12:37:56,289 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 502e77060db097ea5decbe44e66ef8e7/A of 502e77060db097ea5decbe44e66ef8e7 into 4af037d188164a58a11e75238e4d2508(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:37:56,289 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:37:56,289 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7., storeName=502e77060db097ea5decbe44e66ef8e7/A, priority=13, startTime=1734439075808; duration=0sec 2024-12-17T12:37:56,291 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:37:56,291 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 502e77060db097ea5decbe44e66ef8e7:A 2024-12-17T12:37:56,295 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=316 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/7f4d016efb9f4683980fd22a938634cd 2024-12-17T12:37:56,300 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 502e77060db097ea5decbe44e66ef8e7#C#compaction#63 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:37:56,301 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/3a851fb53cb14a8a99912a35997e7775 is 50, key is test_row_0/C:col10/1734439075672/Put/seqid=0 2024-12-17T12:37:56,309 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/8a2dfa0ba1b0413fb460e0bf8669c1e1 is 50, key is test_row_0/B:col10/1734439075725/Put/seqid=0 2024-12-17T12:37:56,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741902_1078 (size=12983) 2024-12-17T12:37:56,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741903_1079 (size=12301) 2024-12-17T12:37:56,330 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/3a851fb53cb14a8a99912a35997e7775 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/3a851fb53cb14a8a99912a35997e7775 2024-12-17T12:37:56,331 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=316 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/8a2dfa0ba1b0413fb460e0bf8669c1e1 2024-12-17T12:37:56,338 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 502e77060db097ea5decbe44e66ef8e7/C of 502e77060db097ea5decbe44e66ef8e7 into 3a851fb53cb14a8a99912a35997e7775(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:37:56,338 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:37:56,338 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7., storeName=502e77060db097ea5decbe44e66ef8e7/C, priority=13, startTime=1734439075809; duration=0sec 2024-12-17T12:37:56,338 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:37:56,339 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 502e77060db097ea5decbe44e66ef8e7:C 2024-12-17T12:37:56,341 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/cb98ccb882e5415cb08d7e8341d0e608 is 50, key is test_row_0/C:col10/1734439075725/Put/seqid=0 2024-12-17T12:37:56,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741904_1080 (size=12301) 2024-12-17T12:37:56,350 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=316 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/cb98ccb882e5415cb08d7e8341d0e608 2024-12-17T12:37:56,362 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/7f4d016efb9f4683980fd22a938634cd as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/7f4d016efb9f4683980fd22a938634cd 2024-12-17T12:37:56,371 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/7f4d016efb9f4683980fd22a938634cd, entries=150, sequenceid=316, filesize=12.0 K 2024-12-17T12:37:56,372 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/8a2dfa0ba1b0413fb460e0bf8669c1e1 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/8a2dfa0ba1b0413fb460e0bf8669c1e1 2024-12-17T12:37:56,380 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/8a2dfa0ba1b0413fb460e0bf8669c1e1, entries=150, sequenceid=316, filesize=12.0 K 2024-12-17T12:37:56,381 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/cb98ccb882e5415cb08d7e8341d0e608 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/cb98ccb882e5415cb08d7e8341d0e608 2024-12-17T12:37:56,387 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/cb98ccb882e5415cb08d7e8341d0e608, entries=150, sequenceid=316, filesize=12.0 K 2024-12-17T12:37:56,388 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 502e77060db097ea5decbe44e66ef8e7 in 538ms, sequenceid=316, compaction requested=false 2024-12-17T12:37:56,388 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:37:56,483 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 502e77060db097ea5decbe44e66ef8e7 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-17T12:37:56,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=A 2024-12-17T12:37:56,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:56,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=B 2024-12-17T12:37:56,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:56,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=C 2024-12-17T12:37:56,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:56,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 502e77060db097ea5decbe44e66ef8e7 2024-12-17T12:37:56,490 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/1347f9e6dbfd4ed48aa2717e82c12453 is 50, key is test_row_0/A:col10/1734439075860/Put/seqid=0 2024-12-17T12:37:56,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741905_1081 (size=12301) 2024-12-17T12:37:56,519 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/1347f9e6dbfd4ed48aa2717e82c12453 2024-12-17T12:37:56,534 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/1a6c175735be45ae879fc2e121d72044 is 50, key is test_row_0/B:col10/1734439075860/Put/seqid=0 2024-12-17T12:37:56,540 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:56,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439136537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:56,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741906_1082 (size=12301) 2024-12-17T12:37:56,541 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/1a6c175735be45ae879fc2e121d72044 2024-12-17T12:37:56,543 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:56,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439136540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:56,552 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/7ecf3e0f00ac4e15a9df9f2451e8b294 is 50, key is test_row_0/C:col10/1734439075860/Put/seqid=0 2024-12-17T12:37:56,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741907_1083 (size=12301) 2024-12-17T12:37:56,612 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:56,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35916 deadline: 1734439136612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:56,614 DEBUG [Thread-151 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4130 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7., hostname=681c08bfdbdf,36491,1734439058372, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor41.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-17T12:37:56,614 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:56,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35932 deadline: 1734439136613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:56,615 DEBUG [Thread-149 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4130 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7., hostname=681c08bfdbdf,36491,1734439058372, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor41.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-17T12:37:56,630 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:56,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35902 deadline: 1734439136629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:56,631 DEBUG [Thread-153 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4153 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7., hostname=681c08bfdbdf,36491,1734439058372, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor41.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-17T12:37:56,642 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:56,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439136641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:56,650 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:56,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439136648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:56,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-17T12:37:56,788 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 18 completed 2024-12-17T12:37:56,789 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-17T12:37:56,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees 2024-12-17T12:37:56,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-17T12:37:56,791 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-17T12:37:56,792 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-17T12:37:56,792 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-17T12:37:56,844 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:56,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439136843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:56,854 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:56,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439136853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:56,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-17T12:37:56,952 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:56,953 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-17T12:37:56,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:56,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. as already flushing 2024-12-17T12:37:56,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:56,954 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:56,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:56,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:56,966 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/7ecf3e0f00ac4e15a9df9f2451e8b294 2024-12-17T12:37:56,974 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/1347f9e6dbfd4ed48aa2717e82c12453 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/1347f9e6dbfd4ed48aa2717e82c12453 2024-12-17T12:37:56,981 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/1347f9e6dbfd4ed48aa2717e82c12453, entries=150, sequenceid=332, filesize=12.0 K 2024-12-17T12:37:56,983 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/1a6c175735be45ae879fc2e121d72044 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/1a6c175735be45ae879fc2e121d72044 2024-12-17T12:37:56,989 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/1a6c175735be45ae879fc2e121d72044, entries=150, sequenceid=332, filesize=12.0 K 2024-12-17T12:37:56,991 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/7ecf3e0f00ac4e15a9df9f2451e8b294 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/7ecf3e0f00ac4e15a9df9f2451e8b294 2024-12-17T12:37:56,996 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/7ecf3e0f00ac4e15a9df9f2451e8b294, entries=150, sequenceid=332, filesize=12.0 K 2024-12-17T12:37:56,997 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 502e77060db097ea5decbe44e66ef8e7 in 514ms, sequenceid=332, compaction requested=true 2024-12-17T12:37:56,997 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:37:56,997 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 502e77060db097ea5decbe44e66ef8e7:A, priority=-2147483648, current under compaction store size is 1 2024-12-17T12:37:56,997 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:37:56,997 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:37:56,997 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 502e77060db097ea5decbe44e66ef8e7:B, priority=-2147483648, current under compaction store size is 2 2024-12-17T12:37:56,997 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:37:56,997 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:37:56,997 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 502e77060db097ea5decbe44e66ef8e7:C, priority=-2147483648, current under compaction store size is 3 2024-12-17T12:37:56,997 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:37:56,999 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:37:56,999 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 502e77060db097ea5decbe44e66ef8e7/B is initiating minor compaction (all files) 2024-12-17T12:37:56,999 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 502e77060db097ea5decbe44e66ef8e7/B in TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:56,999 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/c02abcacd306427d8fb14470406432f9, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/8a2dfa0ba1b0413fb460e0bf8669c1e1, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/1a6c175735be45ae879fc2e121d72044] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp, totalSize=36.7 K 2024-12-17T12:37:56,999 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:37:56,999 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): 502e77060db097ea5decbe44e66ef8e7/A is initiating minor compaction (all files) 2024-12-17T12:37:56,999 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 502e77060db097ea5decbe44e66ef8e7/A in TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:57,000 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/4af037d188164a58a11e75238e4d2508, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/7f4d016efb9f4683980fd22a938634cd, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/1347f9e6dbfd4ed48aa2717e82c12453] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp, totalSize=36.7 K 2024-12-17T12:37:57,000 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4af037d188164a58a11e75238e4d2508, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1734439074558 2024-12-17T12:37:57,001 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting c02abcacd306427d8fb14470406432f9, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1734439074558 2024-12-17T12:37:57,001 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7f4d016efb9f4683980fd22a938634cd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1734439075725 2024-12-17T12:37:57,001 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 8a2dfa0ba1b0413fb460e0bf8669c1e1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1734439075725 2024-12-17T12:37:57,002 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1347f9e6dbfd4ed48aa2717e82c12453, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1734439075860 2024-12-17T12:37:57,002 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 1a6c175735be45ae879fc2e121d72044, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1734439075860 2024-12-17T12:37:57,013 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 502e77060db097ea5decbe44e66ef8e7#A#compaction#69 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:37:57,014 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/243ef21cbcba4f46a7ad4ca901bc1a3a is 50, key is test_row_0/A:col10/1734439075860/Put/seqid=0 2024-12-17T12:37:57,020 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 502e77060db097ea5decbe44e66ef8e7#B#compaction#70 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:37:57,021 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/be3cb812d6304479a80a8cd68f70cdf3 is 50, key is test_row_0/B:col10/1734439075860/Put/seqid=0 2024-12-17T12:37:57,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741908_1084 (size=13085) 2024-12-17T12:37:57,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741909_1085 (size=13085) 2024-12-17T12:37:57,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-17T12:37:57,109 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:57,110 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-17T12:37:57,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:57,110 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing 502e77060db097ea5decbe44e66ef8e7 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-17T12:37:57,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=A 2024-12-17T12:37:57,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:57,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=B 2024-12-17T12:37:57,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:57,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=C 2024-12-17T12:37:57,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:57,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/e7333808be094f16af821cade7e1a086 is 50, key is test_row_0/A:col10/1734439076538/Put/seqid=0 2024-12-17T12:37:57,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741910_1086 (size=12301) 2024-12-17T12:37:57,148 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. as already flushing 2024-12-17T12:37:57,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 502e77060db097ea5decbe44e66ef8e7 2024-12-17T12:37:57,168 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:57,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439137165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:57,168 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:57,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439137165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:57,269 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:57,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439137269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:57,281 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:57,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439137280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:57,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-17T12:37:57,446 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/243ef21cbcba4f46a7ad4ca901bc1a3a as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/243ef21cbcba4f46a7ad4ca901bc1a3a 2024-12-17T12:37:57,454 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 502e77060db097ea5decbe44e66ef8e7/A of 502e77060db097ea5decbe44e66ef8e7 into 243ef21cbcba4f46a7ad4ca901bc1a3a(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:37:57,454 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:37:57,454 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7., storeName=502e77060db097ea5decbe44e66ef8e7/A, priority=13, startTime=1734439076997; duration=0sec 2024-12-17T12:37:57,454 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:37:57,454 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 502e77060db097ea5decbe44e66ef8e7:A 2024-12-17T12:37:57,454 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:37:57,454 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/be3cb812d6304479a80a8cd68f70cdf3 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/be3cb812d6304479a80a8cd68f70cdf3 2024-12-17T12:37:57,456 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:37:57,456 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): 502e77060db097ea5decbe44e66ef8e7/C is initiating minor compaction (all files) 2024-12-17T12:37:57,456 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 502e77060db097ea5decbe44e66ef8e7/C in TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:57,456 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/3a851fb53cb14a8a99912a35997e7775, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/cb98ccb882e5415cb08d7e8341d0e608, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/7ecf3e0f00ac4e15a9df9f2451e8b294] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp, totalSize=36.7 K 2024-12-17T12:37:57,457 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3a851fb53cb14a8a99912a35997e7775, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1734439074558 2024-12-17T12:37:57,457 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting cb98ccb882e5415cb08d7e8341d0e608, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1734439075725 2024-12-17T12:37:57,458 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7ecf3e0f00ac4e15a9df9f2451e8b294, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1734439075860 2024-12-17T12:37:57,461 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 502e77060db097ea5decbe44e66ef8e7/B of 502e77060db097ea5decbe44e66ef8e7 into be3cb812d6304479a80a8cd68f70cdf3(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:37:57,461 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:37:57,461 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7., storeName=502e77060db097ea5decbe44e66ef8e7/B, priority=13, startTime=1734439076997; duration=0sec 2024-12-17T12:37:57,461 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:37:57,461 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 502e77060db097ea5decbe44e66ef8e7:B 2024-12-17T12:37:57,468 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 502e77060db097ea5decbe44e66ef8e7#C#compaction#72 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:37:57,469 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/45a8685432074ec387fc4b5298f484ab is 50, key is test_row_0/C:col10/1734439075860/Put/seqid=0 2024-12-17T12:37:57,471 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:57,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439137471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:57,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741911_1087 (size=13085) 2024-12-17T12:37:57,484 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:57,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439137483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:57,490 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/45a8685432074ec387fc4b5298f484ab as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/45a8685432074ec387fc4b5298f484ab 2024-12-17T12:37:57,496 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 502e77060db097ea5decbe44e66ef8e7/C of 502e77060db097ea5decbe44e66ef8e7 into 45a8685432074ec387fc4b5298f484ab(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:37:57,496 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:37:57,497 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7., storeName=502e77060db097ea5decbe44e66ef8e7/C, priority=13, startTime=1734439076997; duration=0sec 2024-12-17T12:37:57,497 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:37:57,497 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 502e77060db097ea5decbe44e66ef8e7:C 2024-12-17T12:37:57,520 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=355 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/e7333808be094f16af821cade7e1a086 2024-12-17T12:37:57,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/a1481024f42f4b44af5d83da97153252 is 50, key is test_row_0/B:col10/1734439076538/Put/seqid=0 2024-12-17T12:37:57,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741912_1088 (size=12301) 2024-12-17T12:37:57,776 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:57,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439137774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:57,788 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:57,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439137787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:57,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-17T12:37:57,946 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=355 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/a1481024f42f4b44af5d83da97153252 2024-12-17T12:37:57,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/dd87bfd65d9a4b9ca80520e824f3bb99 is 50, key is test_row_0/C:col10/1734439076538/Put/seqid=0 2024-12-17T12:37:57,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741913_1089 (size=12301) 2024-12-17T12:37:57,961 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=355 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/dd87bfd65d9a4b9ca80520e824f3bb99 2024-12-17T12:37:57,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/e7333808be094f16af821cade7e1a086 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/e7333808be094f16af821cade7e1a086 2024-12-17T12:37:57,971 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/e7333808be094f16af821cade7e1a086, entries=150, sequenceid=355, filesize=12.0 K 2024-12-17T12:37:57,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/a1481024f42f4b44af5d83da97153252 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/a1481024f42f4b44af5d83da97153252 2024-12-17T12:37:57,977 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/a1481024f42f4b44af5d83da97153252, entries=150, sequenceid=355, filesize=12.0 K 2024-12-17T12:37:57,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/dd87bfd65d9a4b9ca80520e824f3bb99 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/dd87bfd65d9a4b9ca80520e824f3bb99 2024-12-17T12:37:57,989 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/dd87bfd65d9a4b9ca80520e824f3bb99, entries=150, sequenceid=355, filesize=12.0 K 2024-12-17T12:37:57,990 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for 502e77060db097ea5decbe44e66ef8e7 in 880ms, sequenceid=355, compaction requested=false 2024-12-17T12:37:57,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:37:57,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:57,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-12-17T12:37:57,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-12-17T12:37:57,993 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-12-17T12:37:57,993 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1990 sec 2024-12-17T12:37:57,994 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees in 1.2040 sec 2024-12-17T12:37:58,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 502e77060db097ea5decbe44e66ef8e7 2024-12-17T12:37:58,282 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 502e77060db097ea5decbe44e66ef8e7 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-17T12:37:58,282 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=A 2024-12-17T12:37:58,283 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:58,283 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=B 2024-12-17T12:37:58,283 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:58,283 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=C 2024-12-17T12:37:58,283 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:58,287 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/7cb85dbc614a4d04953b29691b1a00d6 is 50, key is test_row_0/A:col10/1734439078281/Put/seqid=0 2024-12-17T12:37:58,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741914_1090 (size=12301) 2024-12-17T12:37:58,311 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:58,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439138310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:58,313 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:58,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439138311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:58,413 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:58,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439138413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:58,415 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:58,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439138414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:58,617 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:58,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439138616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:58,617 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:58,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439138616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:58,692 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=373 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/7cb85dbc614a4d04953b29691b1a00d6 2024-12-17T12:37:58,700 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/4c27039cfedb4a2a976c6934930f4f2c is 50, key is test_row_0/B:col10/1734439078281/Put/seqid=0 2024-12-17T12:37:58,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741915_1091 (size=12301) 2024-12-17T12:37:58,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-17T12:37:58,895 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 20 completed 2024-12-17T12:37:58,896 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-17T12:37:58,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees 2024-12-17T12:37:58,898 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-17T12:37:58,898 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-17T12:37:58,898 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-17T12:37:58,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-17T12:37:58,922 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:58,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439138920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:58,922 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:58,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439138920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:59,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-17T12:37:59,051 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:59,051 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-17T12:37:59,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:59,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. as already flushing 2024-12-17T12:37:59,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:59,051 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:59,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:59,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:59,110 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=373 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/4c27039cfedb4a2a976c6934930f4f2c 2024-12-17T12:37:59,117 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/9a0904e526fd44b0847b4d4cf7ad9c1f is 50, key is test_row_0/C:col10/1734439078281/Put/seqid=0 2024-12-17T12:37:59,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741916_1092 (size=12301) 2024-12-17T12:37:59,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-17T12:37:59,203 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:59,204 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-17T12:37:59,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:59,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. as already flushing 2024-12-17T12:37:59,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:59,204 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:59,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:59,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:59,356 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:59,356 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-17T12:37:59,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:59,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. as already flushing 2024-12-17T12:37:59,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:59,357 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:59,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:59,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:59,425 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:59,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439139424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:59,426 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:37:59,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439139424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:59,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-17T12:37:59,509 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:59,509 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-17T12:37:59,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:59,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. as already flushing 2024-12-17T12:37:59,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:59,510 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:59,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:59,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:37:59,522 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=373 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/9a0904e526fd44b0847b4d4cf7ad9c1f 2024-12-17T12:37:59,527 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/7cb85dbc614a4d04953b29691b1a00d6 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/7cb85dbc614a4d04953b29691b1a00d6 2024-12-17T12:37:59,533 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/7cb85dbc614a4d04953b29691b1a00d6, entries=150, sequenceid=373, filesize=12.0 K 2024-12-17T12:37:59,534 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/4c27039cfedb4a2a976c6934930f4f2c as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/4c27039cfedb4a2a976c6934930f4f2c 2024-12-17T12:37:59,538 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/4c27039cfedb4a2a976c6934930f4f2c, entries=150, sequenceid=373, filesize=12.0 K 2024-12-17T12:37:59,539 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/9a0904e526fd44b0847b4d4cf7ad9c1f as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/9a0904e526fd44b0847b4d4cf7ad9c1f 2024-12-17T12:37:59,544 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/9a0904e526fd44b0847b4d4cf7ad9c1f, entries=150, sequenceid=373, filesize=12.0 K 2024-12-17T12:37:59,544 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 502e77060db097ea5decbe44e66ef8e7 in 1262ms, sequenceid=373, compaction requested=true 2024-12-17T12:37:59,545 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:37:59,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 502e77060db097ea5decbe44e66ef8e7:A, priority=-2147483648, current under compaction store size is 1 2024-12-17T12:37:59,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:37:59,545 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:37:59,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 502e77060db097ea5decbe44e66ef8e7:B, priority=-2147483648, current under compaction store size is 2 2024-12-17T12:37:59,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:37:59,545 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:37:59,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 502e77060db097ea5decbe44e66ef8e7:C, priority=-2147483648, current under compaction store size is 3 2024-12-17T12:37:59,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:37:59,546 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:37:59,546 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:37:59,546 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 502e77060db097ea5decbe44e66ef8e7/B is initiating minor compaction (all files) 2024-12-17T12:37:59,546 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): 502e77060db097ea5decbe44e66ef8e7/A is initiating minor compaction (all files) 2024-12-17T12:37:59,546 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 502e77060db097ea5decbe44e66ef8e7/B in TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:59,546 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 502e77060db097ea5decbe44e66ef8e7/A in TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:59,547 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/be3cb812d6304479a80a8cd68f70cdf3, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/a1481024f42f4b44af5d83da97153252, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/4c27039cfedb4a2a976c6934930f4f2c] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp, totalSize=36.8 K 2024-12-17T12:37:59,547 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/243ef21cbcba4f46a7ad4ca901bc1a3a, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/e7333808be094f16af821cade7e1a086, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/7cb85dbc614a4d04953b29691b1a00d6] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp, totalSize=36.8 K 2024-12-17T12:37:59,547 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting be3cb812d6304479a80a8cd68f70cdf3, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1734439075860 2024-12-17T12:37:59,547 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 243ef21cbcba4f46a7ad4ca901bc1a3a, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1734439075860 2024-12-17T12:37:59,547 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting a1481024f42f4b44af5d83da97153252, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1734439076534 2024-12-17T12:37:59,547 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting e7333808be094f16af821cade7e1a086, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1734439076534 2024-12-17T12:37:59,548 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7cb85dbc614a4d04953b29691b1a00d6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1734439077164 2024-12-17T12:37:59,548 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 4c27039cfedb4a2a976c6934930f4f2c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1734439077164 2024-12-17T12:37:59,557 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 502e77060db097ea5decbe44e66ef8e7#B#compaction#78 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:37:59,557 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/c3ebb1538da84d9b8bdd4cbc1ad9d046 is 50, key is test_row_0/B:col10/1734439078281/Put/seqid=0 2024-12-17T12:37:59,560 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 502e77060db097ea5decbe44e66ef8e7#A#compaction#79 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:37:59,560 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/cd22c9b487db46bcbf81ecb816bf2743 is 50, key is test_row_0/A:col10/1734439078281/Put/seqid=0 2024-12-17T12:37:59,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741918_1094 (size=13187) 2024-12-17T12:37:59,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741917_1093 (size=13187) 2024-12-17T12:37:59,662 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:37:59,662 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-17T12:37:59,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:59,663 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2837): Flushing 502e77060db097ea5decbe44e66ef8e7 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-17T12:37:59,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=A 2024-12-17T12:37:59,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:59,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=B 2024-12-17T12:37:59,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:59,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=C 2024-12-17T12:37:59,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:37:59,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/cac5923711d94207bb5da7b571b34385 is 50, key is test_row_0/A:col10/1734439078301/Put/seqid=0 2024-12-17T12:37:59,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741919_1095 (size=12301) 2024-12-17T12:37:59,973 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/cd22c9b487db46bcbf81ecb816bf2743 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/cd22c9b487db46bcbf81ecb816bf2743 2024-12-17T12:37:59,977 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/c3ebb1538da84d9b8bdd4cbc1ad9d046 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/c3ebb1538da84d9b8bdd4cbc1ad9d046 2024-12-17T12:37:59,982 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 502e77060db097ea5decbe44e66ef8e7/A of 502e77060db097ea5decbe44e66ef8e7 into cd22c9b487db46bcbf81ecb816bf2743(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:37:59,982 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:37:59,982 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7., storeName=502e77060db097ea5decbe44e66ef8e7/A, priority=13, startTime=1734439079545; duration=0sec 2024-12-17T12:37:59,983 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:37:59,983 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 502e77060db097ea5decbe44e66ef8e7:A 2024-12-17T12:37:59,983 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:37:59,988 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:37:59,988 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): 502e77060db097ea5decbe44e66ef8e7/C is initiating minor compaction (all files) 2024-12-17T12:37:59,988 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 502e77060db097ea5decbe44e66ef8e7/C in TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:37:59,988 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/45a8685432074ec387fc4b5298f484ab, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/dd87bfd65d9a4b9ca80520e824f3bb99, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/9a0904e526fd44b0847b4d4cf7ad9c1f] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp, totalSize=36.8 K 2024-12-17T12:37:59,989 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 45a8685432074ec387fc4b5298f484ab, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1734439075860 2024-12-17T12:37:59,990 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 502e77060db097ea5decbe44e66ef8e7/B of 502e77060db097ea5decbe44e66ef8e7 into c3ebb1538da84d9b8bdd4cbc1ad9d046(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:37:59,990 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:37:59,990 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7., storeName=502e77060db097ea5decbe44e66ef8e7/B, priority=13, startTime=1734439079545; duration=0sec 2024-12-17T12:37:59,990 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:37:59,990 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 502e77060db097ea5decbe44e66ef8e7:B 2024-12-17T12:37:59,990 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting dd87bfd65d9a4b9ca80520e824f3bb99, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1734439076534 2024-12-17T12:37:59,991 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9a0904e526fd44b0847b4d4cf7ad9c1f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1734439077164 2024-12-17T12:38:00,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-17T12:38:00,011 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 502e77060db097ea5decbe44e66ef8e7#C#compaction#81 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:38:00,011 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/a664f261e04643ed8c7aa1038987296e is 50, key is test_row_0/C:col10/1734439078281/Put/seqid=0 2024-12-17T12:38:00,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741920_1096 (size=13187) 2024-12-17T12:38:00,029 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/a664f261e04643ed8c7aa1038987296e as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/a664f261e04643ed8c7aa1038987296e 2024-12-17T12:38:00,037 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 502e77060db097ea5decbe44e66ef8e7/C of 502e77060db097ea5decbe44e66ef8e7 into a664f261e04643ed8c7aa1038987296e(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:38:00,037 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:38:00,037 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7., storeName=502e77060db097ea5decbe44e66ef8e7/C, priority=13, startTime=1734439079545; duration=0sec 2024-12-17T12:38:00,037 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:00,037 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 502e77060db097ea5decbe44e66ef8e7:C 2024-12-17T12:38:00,072 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=394 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/cac5923711d94207bb5da7b571b34385 2024-12-17T12:38:00,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/d264a78eb65c4c26b586733141528dcc is 50, key is test_row_0/B:col10/1734439078301/Put/seqid=0 2024-12-17T12:38:00,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741921_1097 (size=12301) 2024-12-17T12:38:00,429 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. as already flushing 2024-12-17T12:38:00,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 502e77060db097ea5decbe44e66ef8e7 2024-12-17T12:38:00,446 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:00,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439140444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:00,447 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:00,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439140446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:00,485 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=394 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/d264a78eb65c4c26b586733141528dcc 2024-12-17T12:38:00,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/86ab1660606b43ff836a32a0a2e11328 is 50, key is test_row_0/C:col10/1734439078301/Put/seqid=0 2024-12-17T12:38:00,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741922_1098 (size=12301) 2024-12-17T12:38:00,547 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:00,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439140547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:00,548 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:00,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439140547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:00,618 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:00,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35916 deadline: 1734439140618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:00,619 DEBUG [Thread-151 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8136 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7., hostname=681c08bfdbdf,36491,1734439058372, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor41.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-17T12:38:00,642 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:00,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35932 deadline: 1734439140640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:00,642 DEBUG [Thread-149 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8157 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7., hostname=681c08bfdbdf,36491,1734439058372, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor41.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-17T12:38:00,662 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:00,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35902 deadline: 1734439140661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:00,663 DEBUG [Thread-153 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8185 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7., hostname=681c08bfdbdf,36491,1734439058372, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor41.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-17T12:38:00,750 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:00,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439140749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:00,751 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:00,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439140749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:00,899 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=394 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/86ab1660606b43ff836a32a0a2e11328 2024-12-17T12:38:00,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/cac5923711d94207bb5da7b571b34385 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/cac5923711d94207bb5da7b571b34385 2024-12-17T12:38:00,919 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/cac5923711d94207bb5da7b571b34385, entries=150, sequenceid=394, filesize=12.0 K 2024-12-17T12:38:00,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/d264a78eb65c4c26b586733141528dcc as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/d264a78eb65c4c26b586733141528dcc 2024-12-17T12:38:00,928 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/d264a78eb65c4c26b586733141528dcc, entries=150, sequenceid=394, filesize=12.0 K 2024-12-17T12:38:00,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/86ab1660606b43ff836a32a0a2e11328 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/86ab1660606b43ff836a32a0a2e11328 2024-12-17T12:38:00,934 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/86ab1660606b43ff836a32a0a2e11328, entries=150, sequenceid=394, filesize=12.0 K 2024-12-17T12:38:00,935 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 502e77060db097ea5decbe44e66ef8e7 in 1273ms, sequenceid=394, compaction requested=false 2024-12-17T12:38:00,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2538): Flush status journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:38:00,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:38:00,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=23 2024-12-17T12:38:00,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4106): Remote procedure done, pid=23 2024-12-17T12:38:00,937 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=23, resume processing ppid=22 2024-12-17T12:38:00,937 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, ppid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0380 sec 2024-12-17T12:38:00,938 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees in 2.0420 sec 2024-12-17T12:38:01,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-17T12:38:01,005 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 22 completed 2024-12-17T12:38:01,005 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-17T12:38:01,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees 2024-12-17T12:38:01,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-17T12:38:01,007 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-17T12:38:01,007 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-17T12:38:01,007 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-17T12:38:01,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 502e77060db097ea5decbe44e66ef8e7 2024-12-17T12:38:01,053 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 502e77060db097ea5decbe44e66ef8e7 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-17T12:38:01,053 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=A 2024-12-17T12:38:01,053 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:01,053 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=B 2024-12-17T12:38:01,054 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:01,054 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=C 2024-12-17T12:38:01,054 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:01,062 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/77e5a3bdaf3d414d9c7fee18bc772a54 is 50, key is test_row_0/A:col10/1734439080445/Put/seqid=0 2024-12-17T12:38:01,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741923_1099 (size=12301) 2024-12-17T12:38:01,080 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:01,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439141077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:01,082 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:01,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439141080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:01,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-17T12:38:01,159 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:01,159 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-17T12:38:01,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:38:01,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. as already flushing 2024-12-17T12:38:01,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:38:01,160 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:01,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:01,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:01,182 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:01,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439141182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:01,184 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:01,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439141183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:01,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-17T12:38:01,312 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:01,312 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-17T12:38:01,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:38:01,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. as already flushing 2024-12-17T12:38:01,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:38:01,312 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:01,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:01,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:01,384 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:01,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439141384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:01,386 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:01,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439141386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:01,464 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:01,465 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-17T12:38:01,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:38:01,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. as already flushing 2024-12-17T12:38:01,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:38:01,465 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:01,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:01,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:01,471 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=413 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/77e5a3bdaf3d414d9c7fee18bc772a54 2024-12-17T12:38:01,479 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/271bf6699c574c92afd4ceaea62e472e is 50, key is test_row_0/B:col10/1734439080445/Put/seqid=0 2024-12-17T12:38:01,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741924_1100 (size=12301) 2024-12-17T12:38:01,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-17T12:38:01,617 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:01,617 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-17T12:38:01,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:38:01,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. as already flushing 2024-12-17T12:38:01,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:38:01,618 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:01,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:01,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:01,688 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:01,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439141688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:01,690 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:01,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439141689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:01,770 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:01,770 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-17T12:38:01,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:38:01,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. as already flushing 2024-12-17T12:38:01,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:38:01,771 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:01,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:01,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:01,897 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=413 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/271bf6699c574c92afd4ceaea62e472e 2024-12-17T12:38:01,905 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/7f0980dae7504d90b776d75c5f6be73d is 50, key is test_row_0/C:col10/1734439080445/Put/seqid=0 2024-12-17T12:38:01,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741925_1101 (size=12301) 2024-12-17T12:38:01,923 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:01,923 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-17T12:38:01,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:38:01,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. as already flushing 2024-12-17T12:38:01,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:38:01,923 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:01,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:01,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:02,075 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:02,076 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-17T12:38:02,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:38:02,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. as already flushing 2024-12-17T12:38:02,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:38:02,076 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:02,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:02,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:02,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-17T12:38:02,196 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:02,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439142194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:02,196 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:02,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439142194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:02,228 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:02,229 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-17T12:38:02,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:38:02,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. as already flushing 2024-12-17T12:38:02,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:38:02,229 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:02,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:02,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:02,310 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=413 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/7f0980dae7504d90b776d75c5f6be73d 2024-12-17T12:38:02,314 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/77e5a3bdaf3d414d9c7fee18bc772a54 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/77e5a3bdaf3d414d9c7fee18bc772a54 2024-12-17T12:38:02,319 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/77e5a3bdaf3d414d9c7fee18bc772a54, entries=150, sequenceid=413, filesize=12.0 K 2024-12-17T12:38:02,320 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/271bf6699c574c92afd4ceaea62e472e as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/271bf6699c574c92afd4ceaea62e472e 2024-12-17T12:38:02,324 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/271bf6699c574c92afd4ceaea62e472e, entries=150, sequenceid=413, filesize=12.0 K 2024-12-17T12:38:02,326 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/7f0980dae7504d90b776d75c5f6be73d as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/7f0980dae7504d90b776d75c5f6be73d 2024-12-17T12:38:02,335 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/7f0980dae7504d90b776d75c5f6be73d, entries=150, sequenceid=413, filesize=12.0 K 2024-12-17T12:38:02,336 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 502e77060db097ea5decbe44e66ef8e7 in 1283ms, sequenceid=413, compaction requested=true 2024-12-17T12:38:02,337 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:38:02,337 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 502e77060db097ea5decbe44e66ef8e7:A, priority=-2147483648, current under compaction store size is 1 2024-12-17T12:38:02,337 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:02,337 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:38:02,337 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 502e77060db097ea5decbe44e66ef8e7:B, priority=-2147483648, current under compaction store size is 2 2024-12-17T12:38:02,337 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:38:02,337 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 502e77060db097ea5decbe44e66ef8e7:C, priority=-2147483648, current under compaction store size is 3 2024-12-17T12:38:02,337 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:38:02,337 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:38:02,338 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:38:02,338 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): 502e77060db097ea5decbe44e66ef8e7/A is initiating minor compaction (all files) 2024-12-17T12:38:02,338 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 502e77060db097ea5decbe44e66ef8e7/A in TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:38:02,338 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/cd22c9b487db46bcbf81ecb816bf2743, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/cac5923711d94207bb5da7b571b34385, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/77e5a3bdaf3d414d9c7fee18bc772a54] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp, totalSize=36.9 K 2024-12-17T12:38:02,339 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:38:02,339 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 502e77060db097ea5decbe44e66ef8e7/B is initiating minor compaction (all files) 2024-12-17T12:38:02,339 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 502e77060db097ea5decbe44e66ef8e7/B in TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:38:02,339 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting cd22c9b487db46bcbf81ecb816bf2743, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1734439077164 2024-12-17T12:38:02,339 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/c3ebb1538da84d9b8bdd4cbc1ad9d046, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/d264a78eb65c4c26b586733141528dcc, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/271bf6699c574c92afd4ceaea62e472e] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp, totalSize=36.9 K 2024-12-17T12:38:02,339 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting cac5923711d94207bb5da7b571b34385, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=394, earliestPutTs=1734439078301 2024-12-17T12:38:02,339 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting c3ebb1538da84d9b8bdd4cbc1ad9d046, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1734439077164 2024-12-17T12:38:02,340 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting d264a78eb65c4c26b586733141528dcc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=394, earliestPutTs=1734439078301 2024-12-17T12:38:02,340 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 77e5a3bdaf3d414d9c7fee18bc772a54, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=413, earliestPutTs=1734439080442 2024-12-17T12:38:02,340 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 271bf6699c574c92afd4ceaea62e472e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=413, earliestPutTs=1734439080442 2024-12-17T12:38:02,349 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 502e77060db097ea5decbe44e66ef8e7#B#compaction#87 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:38:02,350 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/069b3fdbf64d44cd97a97495152d2df2 is 50, key is test_row_0/B:col10/1734439080445/Put/seqid=0 2024-12-17T12:38:02,352 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 502e77060db097ea5decbe44e66ef8e7#A#compaction#88 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:38:02,353 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/5a262dfa5819411e926ff015a7b5b474 is 50, key is test_row_0/A:col10/1734439080445/Put/seqid=0 2024-12-17T12:38:02,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741926_1102 (size=13289) 2024-12-17T12:38:02,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741927_1103 (size=13289) 2024-12-17T12:38:02,381 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:02,381 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-17T12:38:02,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:38:02,381 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2837): Flushing 502e77060db097ea5decbe44e66ef8e7 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-17T12:38:02,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=A 2024-12-17T12:38:02,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:02,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=B 2024-12-17T12:38:02,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:02,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=C 2024-12-17T12:38:02,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:02,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/dcf407e243c04ce18345d11ce6ae8c8a is 50, key is test_row_0/A:col10/1734439081069/Put/seqid=0 2024-12-17T12:38:02,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741928_1104 (size=12301) 2024-12-17T12:38:02,767 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/069b3fdbf64d44cd97a97495152d2df2 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/069b3fdbf64d44cd97a97495152d2df2 2024-12-17T12:38:02,774 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 502e77060db097ea5decbe44e66ef8e7/B of 502e77060db097ea5decbe44e66ef8e7 into 069b3fdbf64d44cd97a97495152d2df2(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:38:02,774 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:38:02,774 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7., storeName=502e77060db097ea5decbe44e66ef8e7/B, priority=13, startTime=1734439082337; duration=0sec 2024-12-17T12:38:02,774 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:38:02,774 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 502e77060db097ea5decbe44e66ef8e7:B 2024-12-17T12:38:02,774 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:38:02,775 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:38:02,775 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 502e77060db097ea5decbe44e66ef8e7/C is initiating minor compaction (all files) 2024-12-17T12:38:02,775 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 502e77060db097ea5decbe44e66ef8e7/C in TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:38:02,775 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/a664f261e04643ed8c7aa1038987296e, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/86ab1660606b43ff836a32a0a2e11328, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/7f0980dae7504d90b776d75c5f6be73d] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp, totalSize=36.9 K 2024-12-17T12:38:02,776 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting a664f261e04643ed8c7aa1038987296e, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1734439077164 2024-12-17T12:38:02,776 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/5a262dfa5819411e926ff015a7b5b474 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/5a262dfa5819411e926ff015a7b5b474 2024-12-17T12:38:02,776 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 86ab1660606b43ff836a32a0a2e11328, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=394, earliestPutTs=1734439078301 2024-12-17T12:38:02,777 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 7f0980dae7504d90b776d75c5f6be73d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=413, earliestPutTs=1734439080442 2024-12-17T12:38:02,783 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 502e77060db097ea5decbe44e66ef8e7/A of 502e77060db097ea5decbe44e66ef8e7 into 5a262dfa5819411e926ff015a7b5b474(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:38:02,783 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:38:02,783 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7., storeName=502e77060db097ea5decbe44e66ef8e7/A, priority=13, startTime=1734439082337; duration=0sec 2024-12-17T12:38:02,783 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:02,783 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 502e77060db097ea5decbe44e66ef8e7:A 2024-12-17T12:38:02,786 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 502e77060db097ea5decbe44e66ef8e7#C#compaction#90 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:38:02,787 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/4bee003832044e59b9b1f19a8ec7ac40 is 50, key is test_row_0/C:col10/1734439080445/Put/seqid=0 2024-12-17T12:38:02,792 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=433 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/dcf407e243c04ce18345d11ce6ae8c8a 2024-12-17T12:38:02,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741929_1105 (size=13289) 2024-12-17T12:38:02,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/6301e551e46f435aa1993e90bdec35c0 is 50, key is test_row_0/B:col10/1734439081069/Put/seqid=0 2024-12-17T12:38:02,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741930_1106 (size=12301) 2024-12-17T12:38:02,806 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=433 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/6301e551e46f435aa1993e90bdec35c0 2024-12-17T12:38:02,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/7434737a258042ed90fece52f20413f5 is 50, key is test_row_0/C:col10/1734439081069/Put/seqid=0 2024-12-17T12:38:02,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741931_1107 (size=12301) 2024-12-17T12:38:03,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-17T12:38:03,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 502e77060db097ea5decbe44e66ef8e7 2024-12-17T12:38:03,199 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. as already flushing 2024-12-17T12:38:03,200 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/4bee003832044e59b9b1f19a8ec7ac40 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/4bee003832044e59b9b1f19a8ec7ac40 2024-12-17T12:38:03,210 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 502e77060db097ea5decbe44e66ef8e7/C of 502e77060db097ea5decbe44e66ef8e7 into 4bee003832044e59b9b1f19a8ec7ac40(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:38:03,210 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:38:03,210 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7., storeName=502e77060db097ea5decbe44e66ef8e7/C, priority=13, startTime=1734439082337; duration=0sec 2024-12-17T12:38:03,210 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:03,210 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 502e77060db097ea5decbe44e66ef8e7:C 2024-12-17T12:38:03,216 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:03,216 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:03,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439143214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:03,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439143214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:03,218 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=433 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/7434737a258042ed90fece52f20413f5 2024-12-17T12:38:03,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/dcf407e243c04ce18345d11ce6ae8c8a as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/dcf407e243c04ce18345d11ce6ae8c8a 2024-12-17T12:38:03,227 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/dcf407e243c04ce18345d11ce6ae8c8a, entries=150, sequenceid=433, filesize=12.0 K 2024-12-17T12:38:03,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/6301e551e46f435aa1993e90bdec35c0 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/6301e551e46f435aa1993e90bdec35c0 2024-12-17T12:38:03,233 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/6301e551e46f435aa1993e90bdec35c0, entries=150, sequenceid=433, filesize=12.0 K 2024-12-17T12:38:03,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/7434737a258042ed90fece52f20413f5 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/7434737a258042ed90fece52f20413f5 2024-12-17T12:38:03,239 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/7434737a258042ed90fece52f20413f5, entries=150, sequenceid=433, filesize=12.0 K 2024-12-17T12:38:03,240 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=93.93 KB/96180 for 502e77060db097ea5decbe44e66ef8e7 in 859ms, sequenceid=433, compaction requested=false 2024-12-17T12:38:03,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2538): Flush status journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:38:03,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:38:03,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=25 2024-12-17T12:38:03,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4106): Remote procedure done, pid=25 2024-12-17T12:38:03,243 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=25, resume processing ppid=24 2024-12-17T12:38:03,243 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2340 sec 2024-12-17T12:38:03,244 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees in 2.2370 sec 2024-12-17T12:38:03,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 502e77060db097ea5decbe44e66ef8e7 2024-12-17T12:38:03,318 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 502e77060db097ea5decbe44e66ef8e7 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-17T12:38:03,319 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=A 2024-12-17T12:38:03,319 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:03,319 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=B 2024-12-17T12:38:03,319 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:03,319 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=C 2024-12-17T12:38:03,319 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:03,323 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/bc5be528d88d472cbb20105dfe702b2f is 50, key is test_row_0/A:col10/1734439083317/Put/seqid=0 2024-12-17T12:38:03,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741932_1108 (size=12301) 2024-12-17T12:38:03,339 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:03,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439143338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:03,341 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:03,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439143339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:03,442 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:03,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439143440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:03,443 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:03,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439143442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:03,644 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:03,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439143643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:03,645 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:03,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439143644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:03,729 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=455 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/bc5be528d88d472cbb20105dfe702b2f 2024-12-17T12:38:03,737 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/d707ae2a09454d4bb2e831facd5c6829 is 50, key is test_row_0/B:col10/1734439083317/Put/seqid=0 2024-12-17T12:38:03,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741933_1109 (size=12301) 2024-12-17T12:38:03,947 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:03,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439143947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:03,948 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:03,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 243 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439143947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:04,141 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=455 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/d707ae2a09454d4bb2e831facd5c6829 2024-12-17T12:38:04,148 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/d8595f10a4de42ae9d1df37803224b30 is 50, key is test_row_0/C:col10/1734439083317/Put/seqid=0 2024-12-17T12:38:04,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741934_1110 (size=12301) 2024-12-17T12:38:04,451 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:04,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35918 deadline: 1734439144450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:04,451 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:04,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 245 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35922 deadline: 1734439144450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:04,552 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=455 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/d8595f10a4de42ae9d1df37803224b30 2024-12-17T12:38:04,557 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/bc5be528d88d472cbb20105dfe702b2f as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/bc5be528d88d472cbb20105dfe702b2f 2024-12-17T12:38:04,562 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/bc5be528d88d472cbb20105dfe702b2f, entries=150, sequenceid=455, filesize=12.0 K 2024-12-17T12:38:04,563 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/d707ae2a09454d4bb2e831facd5c6829 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/d707ae2a09454d4bb2e831facd5c6829 2024-12-17T12:38:04,568 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/d707ae2a09454d4bb2e831facd5c6829, entries=150, sequenceid=455, filesize=12.0 K 2024-12-17T12:38:04,569 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/d8595f10a4de42ae9d1df37803224b30 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/d8595f10a4de42ae9d1df37803224b30 2024-12-17T12:38:04,573 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/d8595f10a4de42ae9d1df37803224b30, entries=150, sequenceid=455, filesize=12.0 K 2024-12-17T12:38:04,574 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 502e77060db097ea5decbe44e66ef8e7 in 1256ms, sequenceid=455, compaction requested=true 2024-12-17T12:38:04,574 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:38:04,574 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 502e77060db097ea5decbe44e66ef8e7:A, priority=-2147483648, current under compaction store size is 1 2024-12-17T12:38:04,574 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:04,574 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:38:04,574 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 502e77060db097ea5decbe44e66ef8e7:B, priority=-2147483648, current under compaction store size is 2 2024-12-17T12:38:04,574 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:04,574 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:38:04,574 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 502e77060db097ea5decbe44e66ef8e7:C, priority=-2147483648, current under compaction store size is 3 2024-12-17T12:38:04,574 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:38:04,575 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:38:04,575 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:38:04,575 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 502e77060db097ea5decbe44e66ef8e7/B is initiating minor compaction (all files) 2024-12-17T12:38:04,575 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): 502e77060db097ea5decbe44e66ef8e7/A is initiating minor compaction (all files) 2024-12-17T12:38:04,575 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 502e77060db097ea5decbe44e66ef8e7/B in TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:38:04,575 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 502e77060db097ea5decbe44e66ef8e7/A in TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:38:04,576 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/069b3fdbf64d44cd97a97495152d2df2, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/6301e551e46f435aa1993e90bdec35c0, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/d707ae2a09454d4bb2e831facd5c6829] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp, totalSize=37.0 K 2024-12-17T12:38:04,576 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/5a262dfa5819411e926ff015a7b5b474, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/dcf407e243c04ce18345d11ce6ae8c8a, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/bc5be528d88d472cbb20105dfe702b2f] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp, totalSize=37.0 K 2024-12-17T12:38:04,576 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 069b3fdbf64d44cd97a97495152d2df2, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=413, earliestPutTs=1734439080442 2024-12-17T12:38:04,576 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5a262dfa5819411e926ff015a7b5b474, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=413, earliestPutTs=1734439080442 2024-12-17T12:38:04,576 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting dcf407e243c04ce18345d11ce6ae8c8a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=433, earliestPutTs=1734439081069 2024-12-17T12:38:04,576 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 6301e551e46f435aa1993e90bdec35c0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=433, earliestPutTs=1734439081069 2024-12-17T12:38:04,577 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting bc5be528d88d472cbb20105dfe702b2f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=455, earliestPutTs=1734439083213 2024-12-17T12:38:04,577 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting d707ae2a09454d4bb2e831facd5c6829, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=455, earliestPutTs=1734439083213 2024-12-17T12:38:04,584 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 502e77060db097ea5decbe44e66ef8e7#A#compaction#96 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:38:04,584 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/59a86faa8e714815b036420faa2978f2 is 50, key is test_row_0/A:col10/1734439083317/Put/seqid=0 2024-12-17T12:38:04,588 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 502e77060db097ea5decbe44e66ef8e7#B#compaction#97 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:38:04,589 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/eccef23c26ef4e9dbe61a2504456f02d is 50, key is test_row_0/B:col10/1734439083317/Put/seqid=0 2024-12-17T12:38:04,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741935_1111 (size=13391) 2024-12-17T12:38:04,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741936_1112 (size=13391) 2024-12-17T12:38:04,599 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/59a86faa8e714815b036420faa2978f2 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/59a86faa8e714815b036420faa2978f2 2024-12-17T12:38:04,605 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 502e77060db097ea5decbe44e66ef8e7/A of 502e77060db097ea5decbe44e66ef8e7 into 59a86faa8e714815b036420faa2978f2(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:38:04,605 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:38:04,605 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7., storeName=502e77060db097ea5decbe44e66ef8e7/A, priority=13, startTime=1734439084574; duration=0sec 2024-12-17T12:38:04,605 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:38:04,605 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 502e77060db097ea5decbe44e66ef8e7:A 2024-12-17T12:38:04,605 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:38:04,606 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:38:04,606 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): 502e77060db097ea5decbe44e66ef8e7/C is initiating minor compaction (all files) 2024-12-17T12:38:04,606 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 502e77060db097ea5decbe44e66ef8e7/C in TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:38:04,606 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/4bee003832044e59b9b1f19a8ec7ac40, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/7434737a258042ed90fece52f20413f5, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/d8595f10a4de42ae9d1df37803224b30] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp, totalSize=37.0 K 2024-12-17T12:38:04,607 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4bee003832044e59b9b1f19a8ec7ac40, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=413, earliestPutTs=1734439080442 2024-12-17T12:38:04,607 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7434737a258042ed90fece52f20413f5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=433, earliestPutTs=1734439081069 2024-12-17T12:38:04,607 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting d8595f10a4de42ae9d1df37803224b30, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=455, earliestPutTs=1734439083213 2024-12-17T12:38:04,614 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 502e77060db097ea5decbe44e66ef8e7#C#compaction#98 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:38:04,615 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/45a3d2cc724a4ab5b8e7fcace2cee82d is 50, key is test_row_0/C:col10/1734439083317/Put/seqid=0 2024-12-17T12:38:04,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741937_1113 (size=13391) 2024-12-17T12:38:04,998 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/eccef23c26ef4e9dbe61a2504456f02d as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/eccef23c26ef4e9dbe61a2504456f02d 2024-12-17T12:38:05,003 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 502e77060db097ea5decbe44e66ef8e7/B of 502e77060db097ea5decbe44e66ef8e7 into eccef23c26ef4e9dbe61a2504456f02d(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:38:05,004 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:38:05,004 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7., storeName=502e77060db097ea5decbe44e66ef8e7/B, priority=13, startTime=1734439084574; duration=0sec 2024-12-17T12:38:05,004 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:05,004 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 502e77060db097ea5decbe44e66ef8e7:B 2024-12-17T12:38:05,025 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/45a3d2cc724a4ab5b8e7fcace2cee82d as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/45a3d2cc724a4ab5b8e7fcace2cee82d 2024-12-17T12:38:05,030 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 502e77060db097ea5decbe44e66ef8e7/C of 502e77060db097ea5decbe44e66ef8e7 into 45a3d2cc724a4ab5b8e7fcace2cee82d(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:38:05,030 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:38:05,030 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7., storeName=502e77060db097ea5decbe44e66ef8e7/C, priority=13, startTime=1734439084574; duration=0sec 2024-12-17T12:38:05,030 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:05,030 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 502e77060db097ea5decbe44e66ef8e7:C 2024-12-17T12:38:05,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-17T12:38:05,111 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 24 completed 2024-12-17T12:38:05,112 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-17T12:38:05,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees 2024-12-17T12:38:05,113 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-17T12:38:05,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-17T12:38:05,113 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-17T12:38:05,114 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=26, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-17T12:38:05,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-17T12:38:05,265 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:05,265 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-17T12:38:05,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:38:05,266 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2837): Flushing 502e77060db097ea5decbe44e66ef8e7 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-17T12:38:05,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=A 2024-12-17T12:38:05,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:05,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=B 2024-12-17T12:38:05,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:05,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=C 2024-12-17T12:38:05,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:05,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/bd82dde3abaf4554ab5288da4874ce51 is 50, key is test_row_0/A:col10/1734439083336/Put/seqid=0 2024-12-17T12:38:05,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741938_1114 (size=12301) 2024-12-17T12:38:05,313 DEBUG [Thread-166 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0072c84e to 127.0.0.1:59557 2024-12-17T12:38:05,313 DEBUG [Thread-164 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3909b87d to 127.0.0.1:59557 2024-12-17T12:38:05,313 DEBUG [Thread-162 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0c5c7344 to 127.0.0.1:59557 2024-12-17T12:38:05,314 DEBUG [Thread-164 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:38:05,314 DEBUG [Thread-166 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:38:05,314 DEBUG [Thread-162 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:38:05,314 DEBUG [Thread-160 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1c7b4e84 to 127.0.0.1:59557 2024-12-17T12:38:05,314 DEBUG [Thread-160 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:38:05,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-17T12:38:05,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 502e77060db097ea5decbe44e66ef8e7 2024-12-17T12:38:05,461 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. as already flushing 2024-12-17T12:38:05,462 DEBUG [Thread-157 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x49b4be90 to 127.0.0.1:59557 2024-12-17T12:38:05,462 DEBUG [Thread-157 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:38:05,466 DEBUG [Thread-155 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5ddb4a72 to 127.0.0.1:59557 2024-12-17T12:38:05,466 DEBUG [Thread-155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:38:05,677 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=475 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/bd82dde3abaf4554ab5288da4874ce51 2024-12-17T12:38:05,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/cc032affc5f745bcba7d866e9ba0faf9 is 50, key is test_row_0/B:col10/1734439083336/Put/seqid=0 2024-12-17T12:38:05,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741939_1115 (size=12301) 2024-12-17T12:38:05,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-17T12:38:06,098 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=475 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/cc032affc5f745bcba7d866e9ba0faf9 2024-12-17T12:38:06,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/0f1c94412a4041dd8ee937ab733c10e7 is 50, key is test_row_0/C:col10/1734439083336/Put/seqid=0 2024-12-17T12:38:06,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741940_1116 (size=12301) 2024-12-17T12:38:06,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-17T12:38:06,518 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=475 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/0f1c94412a4041dd8ee937ab733c10e7 2024-12-17T12:38:06,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/bd82dde3abaf4554ab5288da4874ce51 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/bd82dde3abaf4554ab5288da4874ce51 2024-12-17T12:38:06,536 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/bd82dde3abaf4554ab5288da4874ce51, entries=150, sequenceid=475, filesize=12.0 K 2024-12-17T12:38:06,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/cc032affc5f745bcba7d866e9ba0faf9 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/cc032affc5f745bcba7d866e9ba0faf9 2024-12-17T12:38:06,542 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/cc032affc5f745bcba7d866e9ba0faf9, entries=150, sequenceid=475, filesize=12.0 K 2024-12-17T12:38:06,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/0f1c94412a4041dd8ee937ab733c10e7 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/0f1c94412a4041dd8ee937ab733c10e7 2024-12-17T12:38:06,547 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/0f1c94412a4041dd8ee937ab733c10e7, entries=150, sequenceid=475, filesize=12.0 K 2024-12-17T12:38:06,548 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=13.42 KB/13740 for 502e77060db097ea5decbe44e66ef8e7 in 1283ms, sequenceid=475, compaction requested=false 2024-12-17T12:38:06,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2538): Flush status journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:38:06,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:38:06,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=27 2024-12-17T12:38:06,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4106): Remote procedure done, pid=27 2024-12-17T12:38:06,550 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=26 2024-12-17T12:38:06,550 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4350 sec 2024-12-17T12:38:06,551 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees in 1.4390 sec 2024-12-17T12:38:06,884 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-17T12:38:07,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-17T12:38:07,220 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 26 completed 2024-12-17T12:38:10,659 DEBUG [Thread-149 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x336619a6 to 127.0.0.1:59557 2024-12-17T12:38:10,660 DEBUG [Thread-149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:38:10,706 DEBUG [Thread-151 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x62083522 to 127.0.0.1:59557 2024-12-17T12:38:10,706 DEBUG [Thread-151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:38:10,735 DEBUG [Thread-153 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x035ff1ca to 127.0.0.1:59557 2024-12-17T12:38:10,736 DEBUG [Thread-153 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:38:10,736 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-17T12:38:10,736 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 50 2024-12-17T12:38:10,736 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 51 2024-12-17T12:38:10,736 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 53 2024-12-17T12:38:10,736 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 96 2024-12-17T12:38:10,736 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 127 2024-12-17T12:38:10,736 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-17T12:38:10,736 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8189 2024-12-17T12:38:10,736 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7967 2024-12-17T12:38:10,736 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-17T12:38:10,736 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3572 2024-12-17T12:38:10,736 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 10713 rows 2024-12-17T12:38:10,736 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3581 2024-12-17T12:38:10,736 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 10739 rows 2024-12-17T12:38:10,736 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-17T12:38:10,736 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x307f6610 to 127.0.0.1:59557 2024-12-17T12:38:10,736 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:38:10,739 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-17T12:38:10,742 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-17T12:38:10,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=28, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-17T12:38:10,750 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734439090749"}]},"ts":"1734439090749"} 2024-12-17T12:38:10,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-17T12:38:10,751 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-17T12:38:10,759 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-17T12:38:10,761 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-17T12:38:10,765 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=30, ppid=29, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=502e77060db097ea5decbe44e66ef8e7, UNASSIGN}] 2024-12-17T12:38:10,766 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=30, ppid=29, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=502e77060db097ea5decbe44e66ef8e7, UNASSIGN 2024-12-17T12:38:10,766 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=30 updating hbase:meta row=502e77060db097ea5decbe44e66ef8e7, regionState=CLOSING, regionLocation=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:10,767 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-17T12:38:10,767 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE; CloseRegionProcedure 502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372}] 2024-12-17T12:38:10,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-17T12:38:10,924 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:10,927 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] handler.UnassignRegionHandler(124): Close 502e77060db097ea5decbe44e66ef8e7 2024-12-17T12:38:10,927 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-17T12:38:10,929 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.HRegion(1681): Closing 502e77060db097ea5decbe44e66ef8e7, disabling compactions & flushes 2024-12-17T12:38:10,929 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:38:10,929 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:38:10,929 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. after waiting 0 ms 2024-12-17T12:38:10,929 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:38:10,929 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.HRegion(2837): Flushing 502e77060db097ea5decbe44e66ef8e7 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-17T12:38:10,930 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=A 2024-12-17T12:38:10,930 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:10,930 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=B 2024-12-17T12:38:10,930 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:10,930 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 502e77060db097ea5decbe44e66ef8e7, store=C 2024-12-17T12:38:10,931 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:10,940 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/8df4bf64564f4390a1615a4b70ecfa21 is 50, key is test_row_0/A:col10/1734439090734/Put/seqid=0 2024-12-17T12:38:10,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741941_1117 (size=12301) 2024-12-17T12:38:11,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-17T12:38:11,345 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=483 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/8df4bf64564f4390a1615a4b70ecfa21 2024-12-17T12:38:11,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-17T12:38:11,356 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/7f13738a9fbb433680c8b81f7e2dfa24 is 50, key is test_row_0/B:col10/1734439090734/Put/seqid=0 2024-12-17T12:38:11,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741942_1118 (size=12301) 2024-12-17T12:38:11,763 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=483 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/7f13738a9fbb433680c8b81f7e2dfa24 2024-12-17T12:38:11,778 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/e946500b78d14093bd5108c3bacb995f is 50, key is test_row_0/C:col10/1734439090734/Put/seqid=0 2024-12-17T12:38:11,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741943_1119 (size=12301) 2024-12-17T12:38:11,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-17T12:38:12,184 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=483 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/e946500b78d14093bd5108c3bacb995f 2024-12-17T12:38:12,194 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/A/8df4bf64564f4390a1615a4b70ecfa21 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/8df4bf64564f4390a1615a4b70ecfa21 2024-12-17T12:38:12,202 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/8df4bf64564f4390a1615a4b70ecfa21, entries=150, sequenceid=483, filesize=12.0 K 2024-12-17T12:38:12,203 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/B/7f13738a9fbb433680c8b81f7e2dfa24 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/7f13738a9fbb433680c8b81f7e2dfa24 2024-12-17T12:38:12,209 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/7f13738a9fbb433680c8b81f7e2dfa24, entries=150, sequenceid=483, filesize=12.0 K 2024-12-17T12:38:12,210 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/.tmp/C/e946500b78d14093bd5108c3bacb995f as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/e946500b78d14093bd5108c3bacb995f 2024-12-17T12:38:12,217 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/e946500b78d14093bd5108c3bacb995f, entries=150, sequenceid=483, filesize=12.0 K 2024-12-17T12:38:12,218 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 502e77060db097ea5decbe44e66ef8e7 in 1289ms, sequenceid=483, compaction requested=true 2024-12-17T12:38:12,218 DEBUG [StoreCloser-TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/0bc642b72715464cac701a29186bd621, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/21b23c9c54c74687b7c662728c32a240, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/150079d0e09848c4ab576b7152782072, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/b43758a7a76d476984e03720925c0120, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/19961d3360f94cb280f28249213e1ac8, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/87b0c6d178b34880b577873124dff88d, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/5599beca86684aedbd8e953af032c03a, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/5758a35fd363413f9cbef2f0e4acbe8b, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/90999d8e166d4e41b5cedbd8bc188ca0, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/c06293551ffb4d82b30c438843da348c, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/44d8695b1d124c17a9327487ca20bf0e, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/4af00000a5d24b40b78e81303f3f57f9, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/33d8004ba8754affbfae3628602aa3b0, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/b560ecc34ba1453b8d095aa2363b8b48, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/cfeef11b96964351b48aaface1b1cb83, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/dbe694f38fe94e96aa216066177221e3, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/ef0b1bb1c7184e8290ead489a09c132d, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/8106759b128a4083a55da4c1663517bb, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/ec4b1d0e21704903b3da83654f26d2d9, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/4af037d188164a58a11e75238e4d2508, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/a39acdd236a04cb5905c702a310c215c, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/7f4d016efb9f4683980fd22a938634cd, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/243ef21cbcba4f46a7ad4ca901bc1a3a, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/1347f9e6dbfd4ed48aa2717e82c12453, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/e7333808be094f16af821cade7e1a086, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/cd22c9b487db46bcbf81ecb816bf2743, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/7cb85dbc614a4d04953b29691b1a00d6, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/cac5923711d94207bb5da7b571b34385, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/5a262dfa5819411e926ff015a7b5b474, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/77e5a3bdaf3d414d9c7fee18bc772a54, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/dcf407e243c04ce18345d11ce6ae8c8a, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/bc5be528d88d472cbb20105dfe702b2f] to archive 2024-12-17T12:38:12,222 DEBUG [StoreCloser-TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-17T12:38:12,229 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/b43758a7a76d476984e03720925c0120 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/b43758a7a76d476984e03720925c0120 2024-12-17T12:38:12,229 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/19961d3360f94cb280f28249213e1ac8 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/19961d3360f94cb280f28249213e1ac8 2024-12-17T12:38:12,229 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/0bc642b72715464cac701a29186bd621 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/0bc642b72715464cac701a29186bd621 2024-12-17T12:38:12,229 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/150079d0e09848c4ab576b7152782072 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/150079d0e09848c4ab576b7152782072 2024-12-17T12:38:12,229 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/5599beca86684aedbd8e953af032c03a to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/5599beca86684aedbd8e953af032c03a 2024-12-17T12:38:12,229 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/5758a35fd363413f9cbef2f0e4acbe8b to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/5758a35fd363413f9cbef2f0e4acbe8b 2024-12-17T12:38:12,229 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/21b23c9c54c74687b7c662728c32a240 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/21b23c9c54c74687b7c662728c32a240 2024-12-17T12:38:12,229 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/87b0c6d178b34880b577873124dff88d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/87b0c6d178b34880b577873124dff88d 2024-12-17T12:38:12,231 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/90999d8e166d4e41b5cedbd8bc188ca0 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/90999d8e166d4e41b5cedbd8bc188ca0 2024-12-17T12:38:12,231 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/4af00000a5d24b40b78e81303f3f57f9 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/4af00000a5d24b40b78e81303f3f57f9 2024-12-17T12:38:12,231 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/44d8695b1d124c17a9327487ca20bf0e to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/44d8695b1d124c17a9327487ca20bf0e 2024-12-17T12:38:12,231 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/33d8004ba8754affbfae3628602aa3b0 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/33d8004ba8754affbfae3628602aa3b0 2024-12-17T12:38:12,231 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/cfeef11b96964351b48aaface1b1cb83 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/cfeef11b96964351b48aaface1b1cb83 2024-12-17T12:38:12,231 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/dbe694f38fe94e96aa216066177221e3 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/dbe694f38fe94e96aa216066177221e3 2024-12-17T12:38:12,231 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/c06293551ffb4d82b30c438843da348c to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/c06293551ffb4d82b30c438843da348c 2024-12-17T12:38:12,233 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/8106759b128a4083a55da4c1663517bb to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/8106759b128a4083a55da4c1663517bb 2024-12-17T12:38:12,233 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/ef0b1bb1c7184e8290ead489a09c132d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/ef0b1bb1c7184e8290ead489a09c132d 2024-12-17T12:38:12,233 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/ec4b1d0e21704903b3da83654f26d2d9 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/ec4b1d0e21704903b3da83654f26d2d9 2024-12-17T12:38:12,233 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/a39acdd236a04cb5905c702a310c215c to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/a39acdd236a04cb5905c702a310c215c 2024-12-17T12:38:12,233 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/7f4d016efb9f4683980fd22a938634cd to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/7f4d016efb9f4683980fd22a938634cd 2024-12-17T12:38:12,234 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/243ef21cbcba4f46a7ad4ca901bc1a3a to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/243ef21cbcba4f46a7ad4ca901bc1a3a 2024-12-17T12:38:12,234 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/4af037d188164a58a11e75238e4d2508 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/4af037d188164a58a11e75238e4d2508 2024-12-17T12:38:12,235 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/1347f9e6dbfd4ed48aa2717e82c12453 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/1347f9e6dbfd4ed48aa2717e82c12453 2024-12-17T12:38:12,235 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/7cb85dbc614a4d04953b29691b1a00d6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/7cb85dbc614a4d04953b29691b1a00d6 2024-12-17T12:38:12,235 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/e7333808be094f16af821cade7e1a086 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/e7333808be094f16af821cade7e1a086 2024-12-17T12:38:12,235 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/cd22c9b487db46bcbf81ecb816bf2743 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/cd22c9b487db46bcbf81ecb816bf2743 2024-12-17T12:38:12,236 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/77e5a3bdaf3d414d9c7fee18bc772a54 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/77e5a3bdaf3d414d9c7fee18bc772a54 2024-12-17T12:38:12,236 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/5a262dfa5819411e926ff015a7b5b474 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/5a262dfa5819411e926ff015a7b5b474 2024-12-17T12:38:12,236 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/cac5923711d94207bb5da7b571b34385 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/cac5923711d94207bb5da7b571b34385 2024-12-17T12:38:12,237 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/dcf407e243c04ce18345d11ce6ae8c8a to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/dcf407e243c04ce18345d11ce6ae8c8a 2024-12-17T12:38:12,237 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/bc5be528d88d472cbb20105dfe702b2f to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/bc5be528d88d472cbb20105dfe702b2f 2024-12-17T12:38:12,240 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/b560ecc34ba1453b8d095aa2363b8b48 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/b560ecc34ba1453b8d095aa2363b8b48 2024-12-17T12:38:12,250 DEBUG [StoreCloser-TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/8b9aa4f0809f436e9f18950d2401ebf8, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/ecbb4e16208845bc9b9d512d040c7ef2, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/b5066111b3a541558cdd1b73081771ba, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/216bfe23dabc45daade9d26eadfe9a5a, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/04d8f0355ebd423fb6c17594755de32e, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/760d8912c1c943f8b0b8b37cbf5c2d05, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/0d8a07a1f3184c388f94dea2cfc8a209, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/74757e060b1c4500a25caaf7fc6d81b7, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/57968acc142d40488916b94031faee6a, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/ca0e8ee18bf3402394254cd37b42e01b, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/8886c8ecf5194894b4a5310b953da21f, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/ffed733ce8cf4f14851563a97cdf5aab, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/7d34b52045204eb6ac14446483f1a6d7, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/98d15c66e83643099e52749ac42a1fe4, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/d5b28b2981cb4dbf9433dbcca3fa3108, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/8d888a85fe904338af070b42ccf1d080, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/2e014e5ada034cb7a1090d0f1f56cf50, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/9aaa5d09c17e46049befe677e4fac467, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/011610c4a8c74897a47058143249e8a4, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/c02abcacd306427d8fb14470406432f9, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/24b6425720574c15a3e42019508d6276, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/8a2dfa0ba1b0413fb460e0bf8669c1e1, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/be3cb812d6304479a80a8cd68f70cdf3, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/1a6c175735be45ae879fc2e121d72044, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/a1481024f42f4b44af5d83da97153252, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/c3ebb1538da84d9b8bdd4cbc1ad9d046, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/4c27039cfedb4a2a976c6934930f4f2c, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/d264a78eb65c4c26b586733141528dcc, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/069b3fdbf64d44cd97a97495152d2df2, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/271bf6699c574c92afd4ceaea62e472e, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/6301e551e46f435aa1993e90bdec35c0, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/d707ae2a09454d4bb2e831facd5c6829] to archive 2024-12-17T12:38:12,251 DEBUG [StoreCloser-TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-17T12:38:12,254 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/ecbb4e16208845bc9b9d512d040c7ef2 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/ecbb4e16208845bc9b9d512d040c7ef2 2024-12-17T12:38:12,254 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/216bfe23dabc45daade9d26eadfe9a5a to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/216bfe23dabc45daade9d26eadfe9a5a 2024-12-17T12:38:12,254 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/b5066111b3a541558cdd1b73081771ba to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/b5066111b3a541558cdd1b73081771ba 2024-12-17T12:38:12,254 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/760d8912c1c943f8b0b8b37cbf5c2d05 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/760d8912c1c943f8b0b8b37cbf5c2d05 2024-12-17T12:38:12,254 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/04d8f0355ebd423fb6c17594755de32e to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/04d8f0355ebd423fb6c17594755de32e 2024-12-17T12:38:12,254 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/0d8a07a1f3184c388f94dea2cfc8a209 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/0d8a07a1f3184c388f94dea2cfc8a209 2024-12-17T12:38:12,254 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/8b9aa4f0809f436e9f18950d2401ebf8 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/8b9aa4f0809f436e9f18950d2401ebf8 2024-12-17T12:38:12,254 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/74757e060b1c4500a25caaf7fc6d81b7 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/74757e060b1c4500a25caaf7fc6d81b7 2024-12-17T12:38:12,255 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/ca0e8ee18bf3402394254cd37b42e01b to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/ca0e8ee18bf3402394254cd37b42e01b 2024-12-17T12:38:12,255 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/57968acc142d40488916b94031faee6a to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/57968acc142d40488916b94031faee6a 2024-12-17T12:38:12,256 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/98d15c66e83643099e52749ac42a1fe4 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/98d15c66e83643099e52749ac42a1fe4 2024-12-17T12:38:12,256 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/8886c8ecf5194894b4a5310b953da21f to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/8886c8ecf5194894b4a5310b953da21f 2024-12-17T12:38:12,256 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/d5b28b2981cb4dbf9433dbcca3fa3108 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/d5b28b2981cb4dbf9433dbcca3fa3108 2024-12-17T12:38:12,256 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/7d34b52045204eb6ac14446483f1a6d7 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/7d34b52045204eb6ac14446483f1a6d7 2024-12-17T12:38:12,256 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/ffed733ce8cf4f14851563a97cdf5aab to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/ffed733ce8cf4f14851563a97cdf5aab 2024-12-17T12:38:12,256 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/8d888a85fe904338af070b42ccf1d080 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/8d888a85fe904338af070b42ccf1d080 2024-12-17T12:38:12,257 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/2e014e5ada034cb7a1090d0f1f56cf50 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/2e014e5ada034cb7a1090d0f1f56cf50 2024-12-17T12:38:12,257 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/011610c4a8c74897a47058143249e8a4 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/011610c4a8c74897a47058143249e8a4 2024-12-17T12:38:12,258 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/9aaa5d09c17e46049befe677e4fac467 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/9aaa5d09c17e46049befe677e4fac467 2024-12-17T12:38:12,258 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/be3cb812d6304479a80a8cd68f70cdf3 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/be3cb812d6304479a80a8cd68f70cdf3 2024-12-17T12:38:12,258 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/c02abcacd306427d8fb14470406432f9 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/c02abcacd306427d8fb14470406432f9 2024-12-17T12:38:12,258 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/8a2dfa0ba1b0413fb460e0bf8669c1e1 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/8a2dfa0ba1b0413fb460e0bf8669c1e1 2024-12-17T12:38:12,258 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/24b6425720574c15a3e42019508d6276 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/24b6425720574c15a3e42019508d6276 2024-12-17T12:38:12,260 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/a1481024f42f4b44af5d83da97153252 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/a1481024f42f4b44af5d83da97153252 2024-12-17T12:38:12,260 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/1a6c175735be45ae879fc2e121d72044 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/1a6c175735be45ae879fc2e121d72044 2024-12-17T12:38:12,260 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/069b3fdbf64d44cd97a97495152d2df2 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/069b3fdbf64d44cd97a97495152d2df2 2024-12-17T12:38:12,260 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/4c27039cfedb4a2a976c6934930f4f2c to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/4c27039cfedb4a2a976c6934930f4f2c 2024-12-17T12:38:12,261 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/271bf6699c574c92afd4ceaea62e472e to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/271bf6699c574c92afd4ceaea62e472e 2024-12-17T12:38:12,261 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/c3ebb1538da84d9b8bdd4cbc1ad9d046 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/c3ebb1538da84d9b8bdd4cbc1ad9d046 2024-12-17T12:38:12,261 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/6301e551e46f435aa1993e90bdec35c0 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/6301e551e46f435aa1993e90bdec35c0 2024-12-17T12:38:12,261 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/d264a78eb65c4c26b586733141528dcc to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/d264a78eb65c4c26b586733141528dcc 2024-12-17T12:38:12,261 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/d707ae2a09454d4bb2e831facd5c6829 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/d707ae2a09454d4bb2e831facd5c6829 2024-12-17T12:38:12,263 DEBUG [StoreCloser-TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/eb9ed69984714c159a7cf366fcd86e56, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/a47c60cb4af8431db47a30a09c2feba5, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/454b8cbd62df49d287c823cb751a7d9b, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/725ed8e6c3384b83961e2c7a74485b92, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/6d9bc5840abf42859b824d5963337d04, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/ff3a685f36d84e76b1f5c970db56e2be, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/44481a16d4f64e3f8d5452eaa8098926, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/d4391d361e6e443e858b8cb0c0cb6701, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/939be3926f9842c0845cb6ae7b0f734a, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/431e4ec352cf40c9806938a5bfd49b2b, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/6032712651c6482a865f23372ae6b805, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/ef47535811454a9abd7051e622515da0, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/39874262d1564854b1e1e2c47b55326e, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/bebc2ec80fa9459da84143db7497e549, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/4dd6bbbaf4f046cdac9e64044a8351b8, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/31229da7e34045ee9614d75bd6625da6, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/c07fd6ce56df4383b946be742327fdaf, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/df5651d7cda04e6db7bcf471ff98e73b, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/e513e291c287428eb3b7ee23863d60f6, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/3a851fb53cb14a8a99912a35997e7775, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/7b8b8d6ce5e44fa3912903cdedbff12a, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/cb98ccb882e5415cb08d7e8341d0e608, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/45a8685432074ec387fc4b5298f484ab, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/7ecf3e0f00ac4e15a9df9f2451e8b294, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/dd87bfd65d9a4b9ca80520e824f3bb99, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/a664f261e04643ed8c7aa1038987296e, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/9a0904e526fd44b0847b4d4cf7ad9c1f, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/86ab1660606b43ff836a32a0a2e11328, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/4bee003832044e59b9b1f19a8ec7ac40, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/7f0980dae7504d90b776d75c5f6be73d, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/7434737a258042ed90fece52f20413f5, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/d8595f10a4de42ae9d1df37803224b30] to archive 2024-12-17T12:38:12,264 DEBUG [StoreCloser-TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-17T12:38:12,266 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/725ed8e6c3384b83961e2c7a74485b92 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/725ed8e6c3384b83961e2c7a74485b92 2024-12-17T12:38:12,266 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/a47c60cb4af8431db47a30a09c2feba5 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/a47c60cb4af8431db47a30a09c2feba5 2024-12-17T12:38:12,266 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/44481a16d4f64e3f8d5452eaa8098926 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/44481a16d4f64e3f8d5452eaa8098926 2024-12-17T12:38:12,266 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/6d9bc5840abf42859b824d5963337d04 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/6d9bc5840abf42859b824d5963337d04 2024-12-17T12:38:12,266 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/ff3a685f36d84e76b1f5c970db56e2be to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/ff3a685f36d84e76b1f5c970db56e2be 2024-12-17T12:38:12,266 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/454b8cbd62df49d287c823cb751a7d9b to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/454b8cbd62df49d287c823cb751a7d9b 2024-12-17T12:38:12,267 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/eb9ed69984714c159a7cf366fcd86e56 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/eb9ed69984714c159a7cf366fcd86e56 2024-12-17T12:38:12,268 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/ef47535811454a9abd7051e622515da0 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/ef47535811454a9abd7051e622515da0 2024-12-17T12:38:12,268 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/39874262d1564854b1e1e2c47b55326e to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/39874262d1564854b1e1e2c47b55326e 2024-12-17T12:38:12,268 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/431e4ec352cf40c9806938a5bfd49b2b to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/431e4ec352cf40c9806938a5bfd49b2b 2024-12-17T12:38:12,269 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/d4391d361e6e443e858b8cb0c0cb6701 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/d4391d361e6e443e858b8cb0c0cb6701 2024-12-17T12:38:12,269 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/939be3926f9842c0845cb6ae7b0f734a to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/939be3926f9842c0845cb6ae7b0f734a 2024-12-17T12:38:12,269 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/bebc2ec80fa9459da84143db7497e549 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/bebc2ec80fa9459da84143db7497e549 2024-12-17T12:38:12,269 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/6032712651c6482a865f23372ae6b805 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/6032712651c6482a865f23372ae6b805 2024-12-17T12:38:12,270 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/df5651d7cda04e6db7bcf471ff98e73b to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/df5651d7cda04e6db7bcf471ff98e73b 2024-12-17T12:38:12,270 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/7b8b8d6ce5e44fa3912903cdedbff12a to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/7b8b8d6ce5e44fa3912903cdedbff12a 2024-12-17T12:38:12,270 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/31229da7e34045ee9614d75bd6625da6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/31229da7e34045ee9614d75bd6625da6 2024-12-17T12:38:12,271 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/3a851fb53cb14a8a99912a35997e7775 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/3a851fb53cb14a8a99912a35997e7775 2024-12-17T12:38:12,271 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/e513e291c287428eb3b7ee23863d60f6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/e513e291c287428eb3b7ee23863d60f6 2024-12-17T12:38:12,271 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/cb98ccb882e5415cb08d7e8341d0e608 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/cb98ccb882e5415cb08d7e8341d0e608 2024-12-17T12:38:12,272 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/c07fd6ce56df4383b946be742327fdaf to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/c07fd6ce56df4383b946be742327fdaf 2024-12-17T12:38:12,274 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/4dd6bbbaf4f046cdac9e64044a8351b8 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/4dd6bbbaf4f046cdac9e64044a8351b8 2024-12-17T12:38:12,275 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/45a8685432074ec387fc4b5298f484ab to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/45a8685432074ec387fc4b5298f484ab 2024-12-17T12:38:12,275 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/dd87bfd65d9a4b9ca80520e824f3bb99 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/dd87bfd65d9a4b9ca80520e824f3bb99 2024-12-17T12:38:12,275 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/a664f261e04643ed8c7aa1038987296e to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/a664f261e04643ed8c7aa1038987296e 2024-12-17T12:38:12,276 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/86ab1660606b43ff836a32a0a2e11328 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/86ab1660606b43ff836a32a0a2e11328 2024-12-17T12:38:12,276 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/9a0904e526fd44b0847b4d4cf7ad9c1f to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/9a0904e526fd44b0847b4d4cf7ad9c1f 2024-12-17T12:38:12,276 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/7ecf3e0f00ac4e15a9df9f2451e8b294 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/7ecf3e0f00ac4e15a9df9f2451e8b294 2024-12-17T12:38:12,276 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/4bee003832044e59b9b1f19a8ec7ac40 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/4bee003832044e59b9b1f19a8ec7ac40 2024-12-17T12:38:12,277 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/7f0980dae7504d90b776d75c5f6be73d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/7f0980dae7504d90b776d75c5f6be73d 2024-12-17T12:38:12,277 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/d8595f10a4de42ae9d1df37803224b30 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/d8595f10a4de42ae9d1df37803224b30 2024-12-17T12:38:12,277 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/7434737a258042ed90fece52f20413f5 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/7434737a258042ed90fece52f20413f5 2024-12-17T12:38:12,281 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/recovered.edits/486.seqid, newMaxSeqId=486, maxSeqId=1 2024-12-17T12:38:12,283 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7. 2024-12-17T12:38:12,284 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.HRegion(1635): Region close journal for 502e77060db097ea5decbe44e66ef8e7: 2024-12-17T12:38:12,285 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] handler.UnassignRegionHandler(170): Closed 502e77060db097ea5decbe44e66ef8e7 2024-12-17T12:38:12,285 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=30 updating hbase:meta row=502e77060db097ea5decbe44e66ef8e7, regionState=CLOSED 2024-12-17T12:38:12,288 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=31, resume processing ppid=30 2024-12-17T12:38:12,288 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, ppid=30, state=SUCCESS; CloseRegionProcedure 502e77060db097ea5decbe44e66ef8e7, server=681c08bfdbdf,36491,1734439058372 in 1.5190 sec 2024-12-17T12:38:12,289 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=30, resume processing ppid=29 2024-12-17T12:38:12,289 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, ppid=29, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=502e77060db097ea5decbe44e66ef8e7, UNASSIGN in 1.5230 sec 2024-12-17T12:38:12,291 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=29, resume processing ppid=28 2024-12-17T12:38:12,291 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, ppid=28, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.5290 sec 2024-12-17T12:38:12,292 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734439092292"}]},"ts":"1734439092292"} 2024-12-17T12:38:12,293 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-17T12:38:12,334 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-17T12:38:12,336 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.5910 sec 2024-12-17T12:38:12,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-17T12:38:12,858 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 28 completed 2024-12-17T12:38:12,864 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-17T12:38:12,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=32, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-17T12:38:12,871 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=32, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-17T12:38:12,872 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=32, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-17T12:38:12,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-17T12:38:12,875 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7 2024-12-17T12:38:12,880 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A, FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B, FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C, FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/recovered.edits] 2024-12-17T12:38:12,883 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/59a86faa8e714815b036420faa2978f2 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/59a86faa8e714815b036420faa2978f2 2024-12-17T12:38:12,883 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/bd82dde3abaf4554ab5288da4874ce51 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/bd82dde3abaf4554ab5288da4874ce51 2024-12-17T12:38:12,883 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/8df4bf64564f4390a1615a4b70ecfa21 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/A/8df4bf64564f4390a1615a4b70ecfa21 2024-12-17T12:38:12,886 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/7f13738a9fbb433680c8b81f7e2dfa24 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/7f13738a9fbb433680c8b81f7e2dfa24 2024-12-17T12:38:12,886 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/eccef23c26ef4e9dbe61a2504456f02d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/eccef23c26ef4e9dbe61a2504456f02d 2024-12-17T12:38:12,886 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/cc032affc5f745bcba7d866e9ba0faf9 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/B/cc032affc5f745bcba7d866e9ba0faf9 2024-12-17T12:38:12,889 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/e946500b78d14093bd5108c3bacb995f to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/e946500b78d14093bd5108c3bacb995f 2024-12-17T12:38:12,889 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/0f1c94412a4041dd8ee937ab733c10e7 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/0f1c94412a4041dd8ee937ab733c10e7 2024-12-17T12:38:12,890 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/45a3d2cc724a4ab5b8e7fcace2cee82d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/C/45a3d2cc724a4ab5b8e7fcace2cee82d 2024-12-17T12:38:12,892 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/recovered.edits/486.seqid to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7/recovered.edits/486.seqid 2024-12-17T12:38:12,893 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/502e77060db097ea5decbe44e66ef8e7 2024-12-17T12:38:12,893 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-17T12:38:12,897 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=32, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-17T12:38:12,901 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-12-17T12:38:12,903 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-17T12:38:12,930 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-17T12:38:12,931 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=32, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-17T12:38:12,931 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-17T12:38:12,931 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734439092931"}]},"ts":"9223372036854775807"} 2024-12-17T12:38:12,935 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-17T12:38:12,935 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 502e77060db097ea5decbe44e66ef8e7, NAME => 'TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7.', STARTKEY => '', ENDKEY => ''}] 2024-12-17T12:38:12,935 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-17T12:38:12,935 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734439092935"}]},"ts":"9223372036854775807"} 2024-12-17T12:38:12,937 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-17T12:38:12,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-17T12:38:12,977 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=32, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-17T12:38:12,978 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 112 msec 2024-12-17T12:38:13,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-17T12:38:13,177 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 32 completed 2024-12-17T12:38:13,193 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=245 (was 219) Potentially hanging thread: hconnection-0x3fe77b6d-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x3fe77b6d-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS:0;681c08bfdbdf:36491-shortCompactions-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2077175215_22 at /127.0.0.1:39780 [Waiting for operation #13] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x3fe77b6d-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: HFileArchiver-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x3fe77b6d-shared-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=453 (was 442) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=240 (was 180) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3911 (was 4457) 2024-12-17T12:38:13,205 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=245, OpenFileDescriptor=453, MaxFileDescriptor=1048576, SystemLoadAverage=240, ProcessCount=11, AvailableMemoryMB=3910 2024-12-17T12:38:13,207 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-17T12:38:13,208 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-17T12:38:13,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=33, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-17T12:38:13,210 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=33, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-17T12:38:13,210 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:13,210 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 33 2024-12-17T12:38:13,211 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=33, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-17T12:38:13,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=33 2024-12-17T12:38:13,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741944_1120 (size=963) 2024-12-17T12:38:13,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=33 2024-12-17T12:38:13,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=33 2024-12-17T12:38:13,626 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9 2024-12-17T12:38:13,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741945_1121 (size=53) 2024-12-17T12:38:13,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=33 2024-12-17T12:38:14,034 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-17T12:38:14,034 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 1e405373380390a8eca5f807f91814d6, disabling compactions & flushes 2024-12-17T12:38:14,034 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:14,034 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:14,035 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. after waiting 0 ms 2024-12-17T12:38:14,035 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:14,035 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:14,035 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 1e405373380390a8eca5f807f91814d6: 2024-12-17T12:38:14,037 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=33, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-17T12:38:14,038 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1734439094037"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734439094037"}]},"ts":"1734439094037"} 2024-12-17T12:38:14,041 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-17T12:38:14,042 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=33, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-17T12:38:14,043 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734439094042"}]},"ts":"1734439094042"} 2024-12-17T12:38:14,044 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-17T12:38:14,092 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=34, ppid=33, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=1e405373380390a8eca5f807f91814d6, ASSIGN}] 2024-12-17T12:38:14,094 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=34, ppid=33, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=1e405373380390a8eca5f807f91814d6, ASSIGN 2024-12-17T12:38:14,095 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=34, ppid=33, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=1e405373380390a8eca5f807f91814d6, ASSIGN; state=OFFLINE, location=681c08bfdbdf,36491,1734439058372; forceNewPlan=false, retain=false 2024-12-17T12:38:14,246 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=34 updating hbase:meta row=1e405373380390a8eca5f807f91814d6, regionState=OPENING, regionLocation=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:14,250 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=35, ppid=34, state=RUNNABLE; OpenRegionProcedure 1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372}] 2024-12-17T12:38:14,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=33 2024-12-17T12:38:14,404 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:14,410 INFO [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=35}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:14,411 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=35}] regionserver.HRegion(7285): Opening region: {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} 2024-12-17T12:38:14,411 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=35}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:14,411 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=35}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-17T12:38:14,412 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=35}] regionserver.HRegion(7327): checking encryption for 1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:14,412 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=35}] regionserver.HRegion(7330): checking classloading for 1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:14,414 INFO [StoreOpener-1e405373380390a8eca5f807f91814d6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:14,416 INFO [StoreOpener-1e405373380390a8eca5f807f91814d6-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-17T12:38:14,417 INFO [StoreOpener-1e405373380390a8eca5f807f91814d6-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1e405373380390a8eca5f807f91814d6 columnFamilyName A 2024-12-17T12:38:14,417 DEBUG [StoreOpener-1e405373380390a8eca5f807f91814d6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:14,418 INFO [StoreOpener-1e405373380390a8eca5f807f91814d6-1 {}] regionserver.HStore(327): Store=1e405373380390a8eca5f807f91814d6/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-17T12:38:14,418 INFO [StoreOpener-1e405373380390a8eca5f807f91814d6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:14,419 INFO [StoreOpener-1e405373380390a8eca5f807f91814d6-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-17T12:38:14,420 INFO [StoreOpener-1e405373380390a8eca5f807f91814d6-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1e405373380390a8eca5f807f91814d6 columnFamilyName B 2024-12-17T12:38:14,420 DEBUG [StoreOpener-1e405373380390a8eca5f807f91814d6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:14,420 INFO [StoreOpener-1e405373380390a8eca5f807f91814d6-1 {}] regionserver.HStore(327): Store=1e405373380390a8eca5f807f91814d6/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-17T12:38:14,420 INFO [StoreOpener-1e405373380390a8eca5f807f91814d6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:14,421 INFO [StoreOpener-1e405373380390a8eca5f807f91814d6-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-17T12:38:14,422 INFO [StoreOpener-1e405373380390a8eca5f807f91814d6-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1e405373380390a8eca5f807f91814d6 columnFamilyName C 2024-12-17T12:38:14,422 DEBUG [StoreOpener-1e405373380390a8eca5f807f91814d6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:14,422 INFO [StoreOpener-1e405373380390a8eca5f807f91814d6-1 {}] regionserver.HStore(327): Store=1e405373380390a8eca5f807f91814d6/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-17T12:38:14,422 INFO [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=35}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:14,423 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=35}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:14,423 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=35}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:14,424 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=35}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-17T12:38:14,425 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=35}] regionserver.HRegion(1085): writing seq id for 1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:14,427 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=35}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-17T12:38:14,428 INFO [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=35}] regionserver.HRegion(1102): Opened 1e405373380390a8eca5f807f91814d6; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67286180, jitterRate=0.002642214298248291}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-17T12:38:14,428 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=35}] regionserver.HRegion(1001): Region open journal for 1e405373380390a8eca5f807f91814d6: 2024-12-17T12:38:14,429 INFO [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=35}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6., pid=35, masterSystemTime=1734439094404 2024-12-17T12:38:14,430 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=35}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:14,430 INFO [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=35}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:14,430 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=34 updating hbase:meta row=1e405373380390a8eca5f807f91814d6, regionState=OPEN, openSeqNum=2, regionLocation=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:14,432 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=35, resume processing ppid=34 2024-12-17T12:38:14,432 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, ppid=34, state=SUCCESS; OpenRegionProcedure 1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 in 181 msec 2024-12-17T12:38:14,434 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=34, resume processing ppid=33 2024-12-17T12:38:14,434 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, ppid=33, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=1e405373380390a8eca5f807f91814d6, ASSIGN in 340 msec 2024-12-17T12:38:14,434 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=33, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-17T12:38:14,434 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734439094434"}]},"ts":"1734439094434"} 2024-12-17T12:38:14,435 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-17T12:38:14,476 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=33, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-17T12:38:14,478 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2680 sec 2024-12-17T12:38:14,761 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-17T12:38:14,765 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32880, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-17T12:38:15,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=33 2024-12-17T12:38:15,321 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 33 completed 2024-12-17T12:38:15,324 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x083eb3a5 to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@50b6382b 2024-12-17T12:38:15,343 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@23bc83b8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:38:15,345 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:38:15,346 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42302, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:38:15,348 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-17T12:38:15,349 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32882, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-17T12:38:15,354 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-17T12:38:15,355 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-17T12:38:15,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=36, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-17T12:38:15,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741946_1122 (size=999) 2024-12-17T12:38:15,773 DEBUG [PEWorker-5 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-17T12:38:15,773 INFO [PEWorker-5 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-17T12:38:15,777 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=37, ppid=36, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-17T12:38:15,787 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=38, ppid=37, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=1e405373380390a8eca5f807f91814d6, REOPEN/MOVE}] 2024-12-17T12:38:15,788 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=38, ppid=37, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=1e405373380390a8eca5f807f91814d6, REOPEN/MOVE 2024-12-17T12:38:15,788 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=38 updating hbase:meta row=1e405373380390a8eca5f807f91814d6, regionState=CLOSING, regionLocation=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:15,789 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-17T12:38:15,790 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE; CloseRegionProcedure 1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372}] 2024-12-17T12:38:15,941 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:15,943 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] handler.UnassignRegionHandler(124): Close 1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:15,943 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-17T12:38:15,943 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1681): Closing 1e405373380390a8eca5f807f91814d6, disabling compactions & flushes 2024-12-17T12:38:15,944 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:15,944 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:15,944 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. after waiting 0 ms 2024-12-17T12:38:15,944 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:15,952 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-17T12:38:15,952 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:15,953 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1635): Region close journal for 1e405373380390a8eca5f807f91814d6: 2024-12-17T12:38:15,953 WARN [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegionServer(3786): Not adding moved region record: 1e405373380390a8eca5f807f91814d6 to self. 2024-12-17T12:38:15,954 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] handler.UnassignRegionHandler(170): Closed 1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:15,955 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=38 updating hbase:meta row=1e405373380390a8eca5f807f91814d6, regionState=CLOSED 2024-12-17T12:38:15,958 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=39, resume processing ppid=38 2024-12-17T12:38:15,958 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, ppid=38, state=SUCCESS; CloseRegionProcedure 1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 in 166 msec 2024-12-17T12:38:15,958 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=38, ppid=37, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=1e405373380390a8eca5f807f91814d6, REOPEN/MOVE; state=CLOSED, location=681c08bfdbdf,36491,1734439058372; forceNewPlan=false, retain=true 2024-12-17T12:38:16,109 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=38 updating hbase:meta row=1e405373380390a8eca5f807f91814d6, regionState=OPENING, regionLocation=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:16,113 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=40, ppid=38, state=RUNNABLE; OpenRegionProcedure 1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372}] 2024-12-17T12:38:16,267 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:16,270 INFO [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:16,270 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(7285): Opening region: {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} 2024-12-17T12:38:16,270 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:16,270 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-17T12:38:16,271 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(7327): checking encryption for 1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:16,271 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(7330): checking classloading for 1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:16,274 INFO [StoreOpener-1e405373380390a8eca5f807f91814d6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:16,275 INFO [StoreOpener-1e405373380390a8eca5f807f91814d6-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-17T12:38:16,279 INFO [StoreOpener-1e405373380390a8eca5f807f91814d6-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1e405373380390a8eca5f807f91814d6 columnFamilyName A 2024-12-17T12:38:16,281 DEBUG [StoreOpener-1e405373380390a8eca5f807f91814d6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:16,282 INFO [StoreOpener-1e405373380390a8eca5f807f91814d6-1 {}] regionserver.HStore(327): Store=1e405373380390a8eca5f807f91814d6/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-17T12:38:16,282 INFO [StoreOpener-1e405373380390a8eca5f807f91814d6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:16,283 INFO [StoreOpener-1e405373380390a8eca5f807f91814d6-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-17T12:38:16,283 INFO [StoreOpener-1e405373380390a8eca5f807f91814d6-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1e405373380390a8eca5f807f91814d6 columnFamilyName B 2024-12-17T12:38:16,283 DEBUG [StoreOpener-1e405373380390a8eca5f807f91814d6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:16,283 INFO [StoreOpener-1e405373380390a8eca5f807f91814d6-1 {}] regionserver.HStore(327): Store=1e405373380390a8eca5f807f91814d6/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-17T12:38:16,283 INFO [StoreOpener-1e405373380390a8eca5f807f91814d6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:16,284 INFO [StoreOpener-1e405373380390a8eca5f807f91814d6-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-17T12:38:16,284 INFO [StoreOpener-1e405373380390a8eca5f807f91814d6-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1e405373380390a8eca5f807f91814d6 columnFamilyName C 2024-12-17T12:38:16,284 DEBUG [StoreOpener-1e405373380390a8eca5f807f91814d6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:16,284 INFO [StoreOpener-1e405373380390a8eca5f807f91814d6-1 {}] regionserver.HStore(327): Store=1e405373380390a8eca5f807f91814d6/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-17T12:38:16,285 INFO [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:16,285 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:16,286 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:16,287 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-17T12:38:16,288 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1085): writing seq id for 1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:16,289 INFO [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1102): Opened 1e405373380390a8eca5f807f91814d6; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63940605, jitterRate=-0.04721073806285858}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-17T12:38:16,291 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1001): Region open journal for 1e405373380390a8eca5f807f91814d6: 2024-12-17T12:38:16,291 INFO [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6., pid=40, masterSystemTime=1734439096267 2024-12-17T12:38:16,292 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:16,293 INFO [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:16,293 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=38 updating hbase:meta row=1e405373380390a8eca5f807f91814d6, regionState=OPEN, openSeqNum=5, regionLocation=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:16,295 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=40, resume processing ppid=38 2024-12-17T12:38:16,295 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, ppid=38, state=SUCCESS; OpenRegionProcedure 1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 in 182 msec 2024-12-17T12:38:16,296 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=38, resume processing ppid=37 2024-12-17T12:38:16,296 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, ppid=37, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=1e405373380390a8eca5f807f91814d6, REOPEN/MOVE in 508 msec 2024-12-17T12:38:16,299 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=37, resume processing ppid=36 2024-12-17T12:38:16,299 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, ppid=36, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 521 msec 2024-12-17T12:38:16,301 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 943 msec 2024-12-17T12:38:16,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-12-17T12:38:16,308 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x478bae6b to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4977266 2024-12-17T12:38:16,370 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5400112e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:38:16,373 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x048068a5 to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5a8f4734 2024-12-17T12:38:16,385 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@38766d64, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:38:16,388 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x10c964e8 to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@9ed28bb 2024-12-17T12:38:16,402 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b5cad1a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:38:16,406 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3c3b736e to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@70267494 2024-12-17T12:38:16,418 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@353bc462, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:38:16,419 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1d2a8e08 to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2c8de680 2024-12-17T12:38:16,426 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47fe2fa7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:38:16,427 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6f6b07e3 to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@595e9ebe 2024-12-17T12:38:16,434 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a0471b9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:38:16,436 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3bb8b26c to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@163e60cb 2024-12-17T12:38:16,443 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1fbe420c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:38:16,445 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x32239a70 to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@577e8cc7 2024-12-17T12:38:16,451 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3abd1a06, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:38:16,453 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6b3821ad to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@480380b9 2024-12-17T12:38:16,467 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a3cce95, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:38:16,473 DEBUG [hconnection-0x3a83afe5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:38:16,474 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-17T12:38:16,474 DEBUG [hconnection-0x27a3616c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:38:16,474 DEBUG [hconnection-0x266405ab-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:38:16,474 DEBUG [hconnection-0x160afe77-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:38:16,475 DEBUG [hconnection-0x2696aa23-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:38:16,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=41, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=41, table=TestAcidGuarantees 2024-12-17T12:38:16,476 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42318, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:38:16,476 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42346, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:38:16,476 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42316, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:38:16,477 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42320, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:38:16,477 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42360, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:38:16,477 DEBUG [hconnection-0x12e2ac45-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:38:16,478 DEBUG [hconnection-0x42b4d2a0-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:38:16,478 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42368, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:38:16,479 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=41, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=41, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-17T12:38:16,479 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42374, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:38:16,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-17T12:38:16,480 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=41, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=41, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-17T12:38:16,480 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-17T12:38:16,480 DEBUG [hconnection-0x3795ec0e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:38:16,482 DEBUG [hconnection-0x8e94b12-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:38:16,482 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42376, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:38:16,483 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42390, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:38:16,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:16,492 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1e405373380390a8eca5f807f91814d6 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-17T12:38:16,492 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=A 2024-12-17T12:38:16,492 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:16,492 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=B 2024-12-17T12:38:16,492 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:16,493 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=C 2024-12-17T12:38:16,493 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:16,533 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:16,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42318 deadline: 1734439156524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:16,533 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:16,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439156526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:16,534 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:16,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439156528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:16,534 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:16,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42320 deadline: 1734439156531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:16,536 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412179ef86698222a46f29de02e78b910aeff_1e405373380390a8eca5f807f91814d6 is 50, key is test_row_0/A:col10/1734439096488/Put/seqid=0 2024-12-17T12:38:16,561 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:16,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42368 deadline: 1734439156559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:16,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741947_1123 (size=12154) 2024-12-17T12:38:16,569 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:16,575 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412179ef86698222a46f29de02e78b910aeff_1e405373380390a8eca5f807f91814d6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412179ef86698222a46f29de02e78b910aeff_1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:16,577 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/f1fa03fe13a14147aca52959d8b3c390, store: [table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:16,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-17T12:38:16,584 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/f1fa03fe13a14147aca52959d8b3c390 is 175, key is test_row_0/A:col10/1734439096488/Put/seqid=0 2024-12-17T12:38:16,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741948_1124 (size=30955) 2024-12-17T12:38:16,632 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:16,632 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-17T12:38:16,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:16,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:16,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:16,633 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] handler.RSProcedureHandler(58): pid=42 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:16,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=42 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:16,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=42 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:16,658 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:16,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439156658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:16,665 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:16,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42368 deadline: 1734439156662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:16,665 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:16,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439156663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:16,665 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:16,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42318 deadline: 1734439156664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:16,666 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:16,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42320 deadline: 1734439156664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:16,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-17T12:38:16,785 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:16,786 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-17T12:38:16,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:16,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:16,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:16,787 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] handler.RSProcedureHandler(58): pid=42 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:16,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=42 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:16,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=42 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:16,860 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:16,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439156859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:16,867 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:16,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42368 deadline: 1734439156866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:16,867 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:16,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439156866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:16,868 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:16,868 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:16,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42318 deadline: 1734439156868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:16,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42320 deadline: 1734439156868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:16,939 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:16,939 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-17T12:38:16,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:16,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:16,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:16,940 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] handler.RSProcedureHandler(58): pid=42 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:16,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=42 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:16,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=42 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:16,998 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=16, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/f1fa03fe13a14147aca52959d8b3c390 2024-12-17T12:38:17,031 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/13e88821aaea4729a1411225ae6eaa62 is 50, key is test_row_0/B:col10/1734439096488/Put/seqid=0 2024-12-17T12:38:17,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741949_1125 (size=12001) 2024-12-17T12:38:17,061 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/13e88821aaea4729a1411225ae6eaa62 2024-12-17T12:38:17,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-17T12:38:17,085 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/3e76a0c002c84b71b3151ebbf8c2f7d4 is 50, key is test_row_0/C:col10/1734439096488/Put/seqid=0 2024-12-17T12:38:17,092 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:17,093 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-17T12:38:17,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741950_1126 (size=12001) 2024-12-17T12:38:17,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:17,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:17,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:17,094 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] handler.RSProcedureHandler(58): pid=42 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:17,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=42 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:17,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=42 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:17,096 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/3e76a0c002c84b71b3151ebbf8c2f7d4 2024-12-17T12:38:17,105 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/f1fa03fe13a14147aca52959d8b3c390 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/f1fa03fe13a14147aca52959d8b3c390 2024-12-17T12:38:17,114 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/f1fa03fe13a14147aca52959d8b3c390, entries=150, sequenceid=16, filesize=30.2 K 2024-12-17T12:38:17,115 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/13e88821aaea4729a1411225ae6eaa62 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/13e88821aaea4729a1411225ae6eaa62 2024-12-17T12:38:17,121 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/13e88821aaea4729a1411225ae6eaa62, entries=150, sequenceid=16, filesize=11.7 K 2024-12-17T12:38:17,122 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/3e76a0c002c84b71b3151ebbf8c2f7d4 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/3e76a0c002c84b71b3151ebbf8c2f7d4 2024-12-17T12:38:17,128 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/3e76a0c002c84b71b3151ebbf8c2f7d4, entries=150, sequenceid=16, filesize=11.7 K 2024-12-17T12:38:17,130 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 1e405373380390a8eca5f807f91814d6 in 638ms, sequenceid=16, compaction requested=false 2024-12-17T12:38:17,130 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1e405373380390a8eca5f807f91814d6: 2024-12-17T12:38:17,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:17,166 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1e405373380390a8eca5f807f91814d6 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-17T12:38:17,166 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=A 2024-12-17T12:38:17,166 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:17,166 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=B 2024-12-17T12:38:17,166 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:17,166 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=C 2024-12-17T12:38:17,167 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:17,178 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217e724601f1c2d4b88bdd8ca638c5c0a8d_1e405373380390a8eca5f807f91814d6 is 50, key is test_row_0/A:col10/1734439096526/Put/seqid=0 2024-12-17T12:38:17,191 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:17,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42320 deadline: 1734439157183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:17,191 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:17,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439157184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:17,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741951_1127 (size=14594) 2024-12-17T12:38:17,197 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:17,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439157188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:17,199 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:17,203 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:17,203 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:17,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42368 deadline: 1734439157190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:17,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42318 deadline: 1734439157191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:17,205 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217e724601f1c2d4b88bdd8ca638c5c0a8d_1e405373380390a8eca5f807f91814d6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217e724601f1c2d4b88bdd8ca638c5c0a8d_1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:17,207 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/089f0a3278734bb8b25017fbd58b0b6b, store: [table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:17,208 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/089f0a3278734bb8b25017fbd58b0b6b is 175, key is test_row_0/A:col10/1734439096526/Put/seqid=0 2024-12-17T12:38:17,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741952_1128 (size=39549) 2024-12-17T12:38:17,227 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/089f0a3278734bb8b25017fbd58b0b6b 2024-12-17T12:38:17,240 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/96e167275edd49c99fe4e942f0c6f7f8 is 50, key is test_row_0/B:col10/1734439096526/Put/seqid=0 2024-12-17T12:38:17,246 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:17,246 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-17T12:38:17,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:17,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:17,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:17,247 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] handler.RSProcedureHandler(58): pid=42 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:17,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=42 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:17,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=42 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:17,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741953_1129 (size=12001) 2024-12-17T12:38:17,272 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/96e167275edd49c99fe4e942f0c6f7f8 2024-12-17T12:38:17,284 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/33c8580942c845288485f1dfbb5726cd is 50, key is test_row_0/C:col10/1734439096526/Put/seqid=0 2024-12-17T12:38:17,293 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:17,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439157293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:17,297 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:17,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42320 deadline: 1734439157294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:17,302 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:17,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439157298, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:17,306 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:17,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42318 deadline: 1734439157305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:17,311 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:17,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42368 deadline: 1734439157307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:17,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741954_1130 (size=12001) 2024-12-17T12:38:17,401 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:17,402 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-17T12:38:17,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:17,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:17,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:17,402 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] handler.RSProcedureHandler(58): pid=42 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:17,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=42 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:17,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=42 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:17,496 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:17,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439157495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:17,501 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:17,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42320 deadline: 1734439157500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:17,504 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:17,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439157504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:17,511 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:17,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42318 deadline: 1734439157509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:17,513 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:17,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42368 deadline: 1734439157513, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:17,554 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:17,554 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-17T12:38:17,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:17,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:17,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:17,555 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] handler.RSProcedureHandler(58): pid=42 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:17,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=42 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:17,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=42 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:17,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-17T12:38:17,706 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:17,707 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-17T12:38:17,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:17,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:17,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:17,707 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] handler.RSProcedureHandler(58): pid=42 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:17,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=42 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:17,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=42 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:17,720 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/33c8580942c845288485f1dfbb5726cd 2024-12-17T12:38:17,728 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/089f0a3278734bb8b25017fbd58b0b6b as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/089f0a3278734bb8b25017fbd58b0b6b 2024-12-17T12:38:17,732 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/089f0a3278734bb8b25017fbd58b0b6b, entries=200, sequenceid=41, filesize=38.6 K 2024-12-17T12:38:17,733 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/96e167275edd49c99fe4e942f0c6f7f8 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/96e167275edd49c99fe4e942f0c6f7f8 2024-12-17T12:38:17,739 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/96e167275edd49c99fe4e942f0c6f7f8, entries=150, sequenceid=41, filesize=11.7 K 2024-12-17T12:38:17,741 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/33c8580942c845288485f1dfbb5726cd as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/33c8580942c845288485f1dfbb5726cd 2024-12-17T12:38:17,747 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/33c8580942c845288485f1dfbb5726cd, entries=150, sequenceid=41, filesize=11.7 K 2024-12-17T12:38:17,748 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 1e405373380390a8eca5f807f91814d6 in 582ms, sequenceid=41, compaction requested=false 2024-12-17T12:38:17,749 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1e405373380390a8eca5f807f91814d6: 2024-12-17T12:38:17,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:17,800 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1e405373380390a8eca5f807f91814d6 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-17T12:38:17,800 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=A 2024-12-17T12:38:17,800 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:17,800 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=B 2024-12-17T12:38:17,800 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:17,800 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=C 2024-12-17T12:38:17,800 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:17,811 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412177b88e5e8b879473c80ed1afc48aaa9d9_1e405373380390a8eca5f807f91814d6 is 50, key is test_row_0/A:col10/1734439097189/Put/seqid=0 2024-12-17T12:38:17,818 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-17T12:38:17,832 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:17,832 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:17,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439157829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:17,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42320 deadline: 1734439157827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:17,833 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:17,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42318 deadline: 1734439157830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:17,836 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:17,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439157832, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:17,837 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:17,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42368 deadline: 1734439157833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:17,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741955_1131 (size=12154) 2024-12-17T12:38:17,859 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:17,859 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-17T12:38:17,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:17,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:17,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:17,860 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] handler.RSProcedureHandler(58): pid=42 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:17,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=42 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:17,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=42 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:17,936 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:17,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42320 deadline: 1734439157934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:17,937 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:17,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439157934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:17,938 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:17,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42318 deadline: 1734439157934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:17,939 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:17,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439157937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:17,940 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:17,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42368 deadline: 1734439157938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:18,011 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:18,012 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-17T12:38:18,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:18,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:18,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:18,012 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] handler.RSProcedureHandler(58): pid=42 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:18,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=42 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:18,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=42 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:18,140 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:18,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439158138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:18,141 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:18,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42320 deadline: 1734439158139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:18,141 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:18,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42318 deadline: 1734439158140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:18,142 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:18,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439158140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:18,143 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:18,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42368 deadline: 1734439158143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:18,165 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:18,165 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-17T12:38:18,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:18,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:18,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:18,166 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] handler.RSProcedureHandler(58): pid=42 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:18,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=42 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:18,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=42 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:18,249 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:18,254 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412177b88e5e8b879473c80ed1afc48aaa9d9_1e405373380390a8eca5f807f91814d6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412177b88e5e8b879473c80ed1afc48aaa9d9_1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:18,255 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/cb10450ef86f4e70ab0c5cf616f3ba20, store: [table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:18,256 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/cb10450ef86f4e70ab0c5cf616f3ba20 is 175, key is test_row_0/A:col10/1734439097189/Put/seqid=0 2024-12-17T12:38:18,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741956_1132 (size=30955) 2024-12-17T12:38:18,267 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=54, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/cb10450ef86f4e70ab0c5cf616f3ba20 2024-12-17T12:38:18,278 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/6b1e93bc0f514a1dbd1fe3ca1c977d43 is 50, key is test_row_0/B:col10/1734439097189/Put/seqid=0 2024-12-17T12:38:18,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741957_1133 (size=12001) 2024-12-17T12:38:18,319 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:18,320 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-17T12:38:18,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:18,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:18,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:18,320 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] handler.RSProcedureHandler(58): pid=42 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:18,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=42 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:18,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=42 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:18,443 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:18,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439158443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:18,444 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:18,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439158444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:18,444 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:18,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42318 deadline: 1734439158444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:18,446 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:18,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42320 deadline: 1734439158445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:18,449 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:18,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42368 deadline: 1734439158447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:18,471 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:18,471 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-17T12:38:18,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:18,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:18,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:18,472 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] handler.RSProcedureHandler(58): pid=42 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:18,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=42 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:18,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=42 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:18,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-17T12:38:18,625 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:18,626 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-17T12:38:18,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:18,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:18,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:18,626 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] handler.RSProcedureHandler(58): pid=42 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:18,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=42 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:18,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=42 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:18,683 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/6b1e93bc0f514a1dbd1fe3ca1c977d43 2024-12-17T12:38:18,695 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/974b0873b5c843fdb81b283228306b4b is 50, key is test_row_0/C:col10/1734439097189/Put/seqid=0 2024-12-17T12:38:18,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741958_1134 (size=12001) 2024-12-17T12:38:18,779 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:18,780 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-17T12:38:18,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:18,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:18,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:18,780 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] handler.RSProcedureHandler(58): pid=42 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:18,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=42 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:18,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=42 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:18,932 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:18,932 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-17T12:38:18,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:18,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:18,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:18,932 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] handler.RSProcedureHandler(58): pid=42 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:18,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=42 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:18,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=42 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:18,948 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:18,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439158946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:18,948 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:18,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42320 deadline: 1734439158947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:18,948 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:18,949 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:18,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42318 deadline: 1734439158948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:18,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439158947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:18,954 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:18,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42368 deadline: 1734439158953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:19,085 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:19,085 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-17T12:38:19,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:19,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:19,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:19,085 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] handler.RSProcedureHandler(58): pid=42 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:19,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=42 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:19,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=42 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:19,101 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/974b0873b5c843fdb81b283228306b4b 2024-12-17T12:38:19,111 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/cb10450ef86f4e70ab0c5cf616f3ba20 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/cb10450ef86f4e70ab0c5cf616f3ba20 2024-12-17T12:38:19,117 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/cb10450ef86f4e70ab0c5cf616f3ba20, entries=150, sequenceid=54, filesize=30.2 K 2024-12-17T12:38:19,118 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/6b1e93bc0f514a1dbd1fe3ca1c977d43 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/6b1e93bc0f514a1dbd1fe3ca1c977d43 2024-12-17T12:38:19,123 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/6b1e93bc0f514a1dbd1fe3ca1c977d43, entries=150, sequenceid=54, filesize=11.7 K 2024-12-17T12:38:19,125 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/974b0873b5c843fdb81b283228306b4b as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/974b0873b5c843fdb81b283228306b4b 2024-12-17T12:38:19,131 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/974b0873b5c843fdb81b283228306b4b, entries=150, sequenceid=54, filesize=11.7 K 2024-12-17T12:38:19,132 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 1e405373380390a8eca5f807f91814d6 in 1332ms, sequenceid=54, compaction requested=true 2024-12-17T12:38:19,132 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1e405373380390a8eca5f807f91814d6: 2024-12-17T12:38:19,132 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1e405373380390a8eca5f807f91814d6:A, priority=-2147483648, current under compaction store size is 1 2024-12-17T12:38:19,132 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:19,132 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:38:19,132 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1e405373380390a8eca5f807f91814d6:B, priority=-2147483648, current under compaction store size is 2 2024-12-17T12:38:19,132 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:19,132 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:38:19,132 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1e405373380390a8eca5f807f91814d6:C, priority=-2147483648, current under compaction store size is 3 2024-12-17T12:38:19,132 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:38:19,133 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101459 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:38:19,133 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:38:19,133 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 1e405373380390a8eca5f807f91814d6/B is initiating minor compaction (all files) 2024-12-17T12:38:19,133 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): 1e405373380390a8eca5f807f91814d6/A is initiating minor compaction (all files) 2024-12-17T12:38:19,133 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1e405373380390a8eca5f807f91814d6/A in TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:19,133 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1e405373380390a8eca5f807f91814d6/B in TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:19,133 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/f1fa03fe13a14147aca52959d8b3c390, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/089f0a3278734bb8b25017fbd58b0b6b, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/cb10450ef86f4e70ab0c5cf616f3ba20] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp, totalSize=99.1 K 2024-12-17T12:38:19,133 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/13e88821aaea4729a1411225ae6eaa62, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/96e167275edd49c99fe4e942f0c6f7f8, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/6b1e93bc0f514a1dbd1fe3ca1c977d43] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp, totalSize=35.2 K 2024-12-17T12:38:19,134 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:19,134 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. files: [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/f1fa03fe13a14147aca52959d8b3c390, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/089f0a3278734bb8b25017fbd58b0b6b, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/cb10450ef86f4e70ab0c5cf616f3ba20] 2024-12-17T12:38:19,134 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 13e88821aaea4729a1411225ae6eaa62, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1734439096488 2024-12-17T12:38:19,134 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting f1fa03fe13a14147aca52959d8b3c390, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1734439096488 2024-12-17T12:38:19,134 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 96e167275edd49c99fe4e942f0c6f7f8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1734439096508 2024-12-17T12:38:19,134 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 089f0a3278734bb8b25017fbd58b0b6b, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1734439096508 2024-12-17T12:38:19,134 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting cb10450ef86f4e70ab0c5cf616f3ba20, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1734439097179 2024-12-17T12:38:19,134 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 6b1e93bc0f514a1dbd1fe3ca1c977d43, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1734439097179 2024-12-17T12:38:19,145 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:19,146 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1e405373380390a8eca5f807f91814d6#B#compaction#114 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:38:19,147 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/9287e37cecbc438aa92043a1cdc63c06 is 50, key is test_row_0/B:col10/1734439097189/Put/seqid=0 2024-12-17T12:38:19,150 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412173ac87227e6064081ab120ee078a163ec_1e405373380390a8eca5f807f91814d6 store=[table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:19,155 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412173ac87227e6064081ab120ee078a163ec_1e405373380390a8eca5f807f91814d6, store=[table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:19,156 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412173ac87227e6064081ab120ee078a163ec_1e405373380390a8eca5f807f91814d6 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:19,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741959_1135 (size=12104) 2024-12-17T12:38:19,170 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/9287e37cecbc438aa92043a1cdc63c06 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/9287e37cecbc438aa92043a1cdc63c06 2024-12-17T12:38:19,183 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1e405373380390a8eca5f807f91814d6/B of 1e405373380390a8eca5f807f91814d6 into 9287e37cecbc438aa92043a1cdc63c06(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:38:19,183 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1e405373380390a8eca5f807f91814d6: 2024-12-17T12:38:19,183 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6., storeName=1e405373380390a8eca5f807f91814d6/B, priority=13, startTime=1734439099132; duration=0sec 2024-12-17T12:38:19,183 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:38:19,183 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1e405373380390a8eca5f807f91814d6:B 2024-12-17T12:38:19,183 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:38:19,184 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:38:19,184 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 1e405373380390a8eca5f807f91814d6/C is initiating minor compaction (all files) 2024-12-17T12:38:19,184 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1e405373380390a8eca5f807f91814d6/C in TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:19,184 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/3e76a0c002c84b71b3151ebbf8c2f7d4, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/33c8580942c845288485f1dfbb5726cd, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/974b0873b5c843fdb81b283228306b4b] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp, totalSize=35.2 K 2024-12-17T12:38:19,185 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 3e76a0c002c84b71b3151ebbf8c2f7d4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1734439096488 2024-12-17T12:38:19,185 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 33c8580942c845288485f1dfbb5726cd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1734439096508 2024-12-17T12:38:19,185 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 974b0873b5c843fdb81b283228306b4b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1734439097179 2024-12-17T12:38:19,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741960_1136 (size=4469) 2024-12-17T12:38:19,191 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1e405373380390a8eca5f807f91814d6#A#compaction#115 average throughput is 0.54 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:38:19,192 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/07220ee5658241de91d754e6a1572f38 is 175, key is test_row_0/A:col10/1734439097189/Put/seqid=0 2024-12-17T12:38:19,198 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1e405373380390a8eca5f807f91814d6#C#compaction#116 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:38:19,199 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/f35cbaa06bd448e18e7d5dd03bd4f7a2 is 50, key is test_row_0/C:col10/1734439097189/Put/seqid=0 2024-12-17T12:38:19,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741961_1137 (size=31058) 2024-12-17T12:38:19,237 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:19,238 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-17T12:38:19,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:19,238 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2837): Flushing 1e405373380390a8eca5f807f91814d6 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-17T12:38:19,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=A 2024-12-17T12:38:19,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:19,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=B 2024-12-17T12:38:19,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:19,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=C 2024-12-17T12:38:19,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:19,241 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/07220ee5658241de91d754e6a1572f38 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/07220ee5658241de91d754e6a1572f38 2024-12-17T12:38:19,249 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1e405373380390a8eca5f807f91814d6/A of 1e405373380390a8eca5f807f91814d6 into 07220ee5658241de91d754e6a1572f38(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:38:19,250 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1e405373380390a8eca5f807f91814d6: 2024-12-17T12:38:19,250 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6., storeName=1e405373380390a8eca5f807f91814d6/A, priority=13, startTime=1734439099132; duration=0sec 2024-12-17T12:38:19,250 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:19,250 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1e405373380390a8eca5f807f91814d6:A 2024-12-17T12:38:19,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741962_1138 (size=12104) 2024-12-17T12:38:19,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217f06d1943bffb432ba45e0f9d68317679_1e405373380390a8eca5f807f91814d6 is 50, key is test_row_0/A:col10/1734439097828/Put/seqid=0 2024-12-17T12:38:19,262 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/f35cbaa06bd448e18e7d5dd03bd4f7a2 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/f35cbaa06bd448e18e7d5dd03bd4f7a2 2024-12-17T12:38:19,268 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1e405373380390a8eca5f807f91814d6/C of 1e405373380390a8eca5f807f91814d6 into f35cbaa06bd448e18e7d5dd03bd4f7a2(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:38:19,268 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1e405373380390a8eca5f807f91814d6: 2024-12-17T12:38:19,268 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6., storeName=1e405373380390a8eca5f807f91814d6/C, priority=13, startTime=1734439099132; duration=0sec 2024-12-17T12:38:19,269 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:19,269 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1e405373380390a8eca5f807f91814d6:C 2024-12-17T12:38:19,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741963_1139 (size=12154) 2024-12-17T12:38:19,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,700 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217f06d1943bffb432ba45e0f9d68317679_1e405373380390a8eca5f807f91814d6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217f06d1943bffb432ba45e0f9d68317679_1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:19,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/d20bc21b2a7c47dfaa9ca5a50ef2fc31, store: [table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:19,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/d20bc21b2a7c47dfaa9ca5a50ef2fc31 is 175, key is test_row_0/A:col10/1734439097828/Put/seqid=0 2024-12-17T12:38:19,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741964_1140 (size=30955) 2024-12-17T12:38:19,708 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=78, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/d20bc21b2a7c47dfaa9ca5a50ef2fc31 2024-12-17T12:38:19,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/e5387c23bd174c5597466649ebb0693e is 50, key is test_row_0/B:col10/1734439097828/Put/seqid=0 2024-12-17T12:38:19,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741965_1141 (size=12001) 2024-12-17T12:38:19,722 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/e5387c23bd174c5597466649ebb0693e 2024-12-17T12:38:19,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/7f47bedaab014415801096fea2c7457d is 50, key is test_row_0/C:col10/1734439097828/Put/seqid=0 2024-12-17T12:38:19,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741966_1142 (size=12001) 2024-12-17T12:38:19,766 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/7f47bedaab014415801096fea2c7457d 2024-12-17T12:38:19,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/d20bc21b2a7c47dfaa9ca5a50ef2fc31 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/d20bc21b2a7c47dfaa9ca5a50ef2fc31 2024-12-17T12:38:19,781 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/d20bc21b2a7c47dfaa9ca5a50ef2fc31, entries=150, sequenceid=78, filesize=30.2 K 2024-12-17T12:38:19,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/e5387c23bd174c5597466649ebb0693e as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/e5387c23bd174c5597466649ebb0693e 2024-12-17T12:38:19,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,788 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/e5387c23bd174c5597466649ebb0693e, entries=150, sequenceid=78, filesize=11.7 K 2024-12-17T12:38:19,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/7f47bedaab014415801096fea2c7457d as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/7f47bedaab014415801096fea2c7457d 2024-12-17T12:38:19,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,795 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/7f47bedaab014415801096fea2c7457d, entries=150, sequenceid=78, filesize=11.7 K 2024-12-17T12:38:19,796 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=0 B/0 for 1e405373380390a8eca5f807f91814d6 in 558ms, sequenceid=78, compaction requested=false 2024-12-17T12:38:19,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2538): Flush status journal for 1e405373380390a8eca5f807f91814d6: 2024-12-17T12:38:19,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:19,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=42 2024-12-17T12:38:19,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4106): Remote procedure done, pid=42 2024-12-17T12:38:19,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,800 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=42, resume processing ppid=41 2024-12-17T12:38:19,800 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, ppid=41, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.3180 sec 2024-12-17T12:38:19,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,803 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=41, table=TestAcidGuarantees in 3.3270 sec 2024-12-17T12:38:19,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:19,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:20,022 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1e405373380390a8eca5f807f91814d6 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-17T12:38:20,022 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=A 2024-12-17T12:38:20,023 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:20,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,023 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=B 2024-12-17T12:38:20,023 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:20,023 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=C 2024-12-17T12:38:20,023 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:20,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,035 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412175767eb46f9d7439184904aa73712ac53_1e405373380390a8eca5f807f91814d6 is 50, key is test_row_0/A:col10/1734439099980/Put/seqid=0 2024-12-17T12:38:20,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741968_1144 (size=24358) 2024-12-17T12:38:20,074 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:20,074 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:20,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42318 deadline: 1734439160066, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:20,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42368 deadline: 1734439160065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:20,075 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:20,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42320 deadline: 1734439160068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:20,086 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:20,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439160074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:20,087 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:20,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439160074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:20,179 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:20,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42318 deadline: 1734439160176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:20,179 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:20,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42368 deadline: 1734439160176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:20,179 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:20,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42320 deadline: 1734439160177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:20,192 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:20,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439160188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:20,193 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:20,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439160188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:20,383 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:20,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42368 deadline: 1734439160380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:20,384 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:20,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42318 deadline: 1734439160381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:20,384 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:20,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42320 deadline: 1734439160381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:20,397 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:20,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439160395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:20,397 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:20,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439160396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:20,454 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,464 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412175767eb46f9d7439184904aa73712ac53_1e405373380390a8eca5f807f91814d6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412175767eb46f9d7439184904aa73712ac53_1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:20,465 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/02fac8aaaf7a4789af0987b31d23bf8e, store: [table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:20,466 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/02fac8aaaf7a4789af0987b31d23bf8e is 175, key is test_row_0/A:col10/1734439099980/Put/seqid=0 2024-12-17T12:38:20,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741967_1143 (size=73994) 2024-12-17T12:38:20,469 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=91, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/02fac8aaaf7a4789af0987b31d23bf8e 2024-12-17T12:38:20,484 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/9d608a1460634578a1d6e6b18313430f is 50, key is test_row_0/B:col10/1734439099980/Put/seqid=0 2024-12-17T12:38:20,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741969_1145 (size=12001) 2024-12-17T12:38:20,498 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/9d608a1460634578a1d6e6b18313430f 2024-12-17T12:38:20,511 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/faaa21e695b4415088259581a80445df is 50, key is test_row_0/C:col10/1734439099980/Put/seqid=0 2024-12-17T12:38:20,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741970_1146 (size=12001) 2024-12-17T12:38:20,519 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/faaa21e695b4415088259581a80445df 2024-12-17T12:38:20,529 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/02fac8aaaf7a4789af0987b31d23bf8e as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/02fac8aaaf7a4789af0987b31d23bf8e 2024-12-17T12:38:20,536 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/02fac8aaaf7a4789af0987b31d23bf8e, entries=400, sequenceid=91, filesize=72.3 K 2024-12-17T12:38:20,538 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/9d608a1460634578a1d6e6b18313430f as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/9d608a1460634578a1d6e6b18313430f 2024-12-17T12:38:20,545 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/9d608a1460634578a1d6e6b18313430f, entries=150, sequenceid=91, filesize=11.7 K 2024-12-17T12:38:20,549 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/faaa21e695b4415088259581a80445df as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/faaa21e695b4415088259581a80445df 2024-12-17T12:38:20,555 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/faaa21e695b4415088259581a80445df, entries=150, sequenceid=91, filesize=11.7 K 2024-12-17T12:38:20,555 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 1e405373380390a8eca5f807f91814d6 in 533ms, sequenceid=91, compaction requested=true 2024-12-17T12:38:20,556 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1e405373380390a8eca5f807f91814d6: 2024-12-17T12:38:20,556 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:38:20,556 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1e405373380390a8eca5f807f91814d6:A, priority=-2147483648, current under compaction store size is 1 2024-12-17T12:38:20,557 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:20,557 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:38:20,557 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1e405373380390a8eca5f807f91814d6:B, priority=-2147483648, current under compaction store size is 2 2024-12-17T12:38:20,557 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:20,557 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1e405373380390a8eca5f807f91814d6:C, priority=-2147483648, current under compaction store size is 3 2024-12-17T12:38:20,557 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:38:20,558 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 136007 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:38:20,558 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:38:20,558 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 1e405373380390a8eca5f807f91814d6/B is initiating minor compaction (all files) 2024-12-17T12:38:20,558 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): 1e405373380390a8eca5f807f91814d6/A is initiating minor compaction (all files) 2024-12-17T12:38:20,558 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1e405373380390a8eca5f807f91814d6/B in TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:20,558 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1e405373380390a8eca5f807f91814d6/A in TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:20,558 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/07220ee5658241de91d754e6a1572f38, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/d20bc21b2a7c47dfaa9ca5a50ef2fc31, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/02fac8aaaf7a4789af0987b31d23bf8e] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp, totalSize=132.8 K 2024-12-17T12:38:20,558 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/9287e37cecbc438aa92043a1cdc63c06, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/e5387c23bd174c5597466649ebb0693e, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/9d608a1460634578a1d6e6b18313430f] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp, totalSize=35.3 K 2024-12-17T12:38:20,558 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:20,558 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. files: [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/07220ee5658241de91d754e6a1572f38, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/d20bc21b2a7c47dfaa9ca5a50ef2fc31, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/02fac8aaaf7a4789af0987b31d23bf8e] 2024-12-17T12:38:20,559 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 9287e37cecbc438aa92043a1cdc63c06, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1734439097179 2024-12-17T12:38:20,559 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 07220ee5658241de91d754e6a1572f38, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1734439097179 2024-12-17T12:38:20,559 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting e5387c23bd174c5597466649ebb0693e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1734439097827 2024-12-17T12:38:20,559 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting d20bc21b2a7c47dfaa9ca5a50ef2fc31, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1734439097827 2024-12-17T12:38:20,560 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 9d608a1460634578a1d6e6b18313430f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1734439099980 2024-12-17T12:38:20,560 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 02fac8aaaf7a4789af0987b31d23bf8e, keycount=400, bloomtype=ROW, size=72.3 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1734439099980 2024-12-17T12:38:20,573 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1e405373380390a8eca5f807f91814d6#B#compaction#123 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:38:20,573 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/34c848b094e842388982fe8e38a61c0b is 50, key is test_row_0/B:col10/1734439099980/Put/seqid=0 2024-12-17T12:38:20,575 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:20,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-17T12:38:20,584 INFO [Thread-622 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 41 completed 2024-12-17T12:38:20,585 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-17T12:38:20,587 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412175d48719551094367b57fa4bdf754407c_1e405373380390a8eca5f807f91814d6 store=[table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:20,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=43, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees 2024-12-17T12:38:20,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-17T12:38:20,588 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=43, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-17T12:38:20,588 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=43, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-17T12:38:20,589 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=44, ppid=43, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-17T12:38:20,597 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412175d48719551094367b57fa4bdf754407c_1e405373380390a8eca5f807f91814d6, store=[table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:20,598 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412175d48719551094367b57fa4bdf754407c_1e405373380390a8eca5f807f91814d6 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:20,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741971_1147 (size=12207) 2024-12-17T12:38:20,608 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/34c848b094e842388982fe8e38a61c0b as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/34c848b094e842388982fe8e38a61c0b 2024-12-17T12:38:20,615 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1e405373380390a8eca5f807f91814d6/B of 1e405373380390a8eca5f807f91814d6 into 34c848b094e842388982fe8e38a61c0b(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:38:20,615 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1e405373380390a8eca5f807f91814d6: 2024-12-17T12:38:20,615 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6., storeName=1e405373380390a8eca5f807f91814d6/B, priority=13, startTime=1734439100557; duration=0sec 2024-12-17T12:38:20,615 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:38:20,615 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1e405373380390a8eca5f807f91814d6:B 2024-12-17T12:38:20,615 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:38:20,617 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:38:20,617 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 1e405373380390a8eca5f807f91814d6/C is initiating minor compaction (all files) 2024-12-17T12:38:20,617 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1e405373380390a8eca5f807f91814d6/C in TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:20,617 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/f35cbaa06bd448e18e7d5dd03bd4f7a2, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/7f47bedaab014415801096fea2c7457d, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/faaa21e695b4415088259581a80445df] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp, totalSize=35.3 K 2024-12-17T12:38:20,618 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting f35cbaa06bd448e18e7d5dd03bd4f7a2, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1734439097179 2024-12-17T12:38:20,620 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 7f47bedaab014415801096fea2c7457d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1734439097827 2024-12-17T12:38:20,620 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting faaa21e695b4415088259581a80445df, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1734439099980 2024-12-17T12:38:20,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741972_1148 (size=4469) 2024-12-17T12:38:20,622 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1e405373380390a8eca5f807f91814d6#A#compaction#124 average throughput is 0.52 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:38:20,623 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/6f11a61beae8452b886e894095d512e7 is 175, key is test_row_0/A:col10/1734439099980/Put/seqid=0 2024-12-17T12:38:20,645 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1e405373380390a8eca5f807f91814d6#C#compaction#125 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:38:20,646 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/aade91c184f14207afe53bc0f3c5535f is 50, key is test_row_0/C:col10/1734439099980/Put/seqid=0 2024-12-17T12:38:20,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741973_1149 (size=31161) 2024-12-17T12:38:20,659 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/6f11a61beae8452b886e894095d512e7 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/6f11a61beae8452b886e894095d512e7 2024-12-17T12:38:20,667 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1e405373380390a8eca5f807f91814d6/A of 1e405373380390a8eca5f807f91814d6 into 6f11a61beae8452b886e894095d512e7(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:38:20,667 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1e405373380390a8eca5f807f91814d6: 2024-12-17T12:38:20,667 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6., storeName=1e405373380390a8eca5f807f91814d6/A, priority=13, startTime=1734439100556; duration=0sec 2024-12-17T12:38:20,667 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:20,667 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1e405373380390a8eca5f807f91814d6:A 2024-12-17T12:38:20,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741974_1150 (size=12207) 2024-12-17T12:38:20,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-17T12:38:20,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:20,690 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1e405373380390a8eca5f807f91814d6 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-12-17T12:38:20,690 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=A 2024-12-17T12:38:20,690 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:20,690 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=B 2024-12-17T12:38:20,690 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:20,690 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=C 2024-12-17T12:38:20,690 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:20,698 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217dd5b7cf0cf8c43a983ea2f988ebe99f2_1e405373380390a8eca5f807f91814d6 is 50, key is test_row_0/A:col10/1734439100688/Put/seqid=0 2024-12-17T12:38:20,702 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:20,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42320 deadline: 1734439160700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:20,702 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:20,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42318 deadline: 1734439160700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:20,703 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:20,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42368 deadline: 1734439160700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:20,703 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:20,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439160700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:20,704 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:20,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439160701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:20,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741975_1151 (size=12154) 2024-12-17T12:38:20,706 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,712 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217dd5b7cf0cf8c43a983ea2f988ebe99f2_1e405373380390a8eca5f807f91814d6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217dd5b7cf0cf8c43a983ea2f988ebe99f2_1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:20,713 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/ffba13bb77e54cee8de48fb3dbeb5248, store: [table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:20,714 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/ffba13bb77e54cee8de48fb3dbeb5248 is 175, key is test_row_0/A:col10/1734439100688/Put/seqid=0 2024-12-17T12:38:20,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741976_1152 (size=30955) 2024-12-17T12:38:20,723 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=121, memsize=55.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/ffba13bb77e54cee8de48fb3dbeb5248 2024-12-17T12:38:20,737 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/7b311827b4364c9eb4e863a04fc2036c is 50, key is test_row_0/B:col10/1734439100688/Put/seqid=0 2024-12-17T12:38:20,741 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:20,742 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-17T12:38:20,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:20,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:20,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:20,742 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:20,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:20,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:20,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741977_1153 (size=12001) 2024-12-17T12:38:20,756 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/7b311827b4364c9eb4e863a04fc2036c 2024-12-17T12:38:20,768 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/498c0595ef874f75ba32a7e0566b6195 is 50, key is test_row_0/C:col10/1734439100688/Put/seqid=0 2024-12-17T12:38:20,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741978_1154 (size=12001) 2024-12-17T12:38:20,782 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/498c0595ef874f75ba32a7e0566b6195 2024-12-17T12:38:20,789 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/ffba13bb77e54cee8de48fb3dbeb5248 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/ffba13bb77e54cee8de48fb3dbeb5248 2024-12-17T12:38:20,798 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/ffba13bb77e54cee8de48fb3dbeb5248, entries=150, sequenceid=121, filesize=30.2 K 2024-12-17T12:38:20,800 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/7b311827b4364c9eb4e863a04fc2036c as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/7b311827b4364c9eb4e863a04fc2036c 2024-12-17T12:38:20,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,808 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/7b311827b4364c9eb4e863a04fc2036c, entries=150, sequenceid=121, filesize=11.7 K 2024-12-17T12:38:20,810 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/498c0595ef874f75ba32a7e0566b6195 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/498c0595ef874f75ba32a7e0566b6195 2024-12-17T12:38:20,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,815 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/498c0595ef874f75ba32a7e0566b6195, entries=150, sequenceid=121, filesize=11.7 K 2024-12-17T12:38:20,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,817 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for 1e405373380390a8eca5f807f91814d6 in 126ms, sequenceid=121, compaction requested=false 2024-12-17T12:38:20,817 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1e405373380390a8eca5f807f91814d6: 2024-12-17T12:38:20,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,843 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1e405373380390a8eca5f807f91814d6 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-17T12:38:20,843 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=A 2024-12-17T12:38:20,844 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:20,844 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=B 2024-12-17T12:38:20,844 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:20,844 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=C 2024-12-17T12:38:20,844 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:20,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:20,854 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121704363d0ede254c3f882e09f27d722b52_1e405373380390a8eca5f807f91814d6 is 50, key is test_row_0/A:col10/1734439100842/Put/seqid=0 2024-12-17T12:38:20,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741979_1155 (size=12254) 2024-12-17T12:38:20,865 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:20,871 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121704363d0ede254c3f882e09f27d722b52_1e405373380390a8eca5f807f91814d6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121704363d0ede254c3f882e09f27d722b52_1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:20,873 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/1edb257536804cb7a461928cc6b86604, store: [table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:20,874 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/1edb257536804cb7a461928cc6b86604 is 175, key is test_row_0/A:col10/1734439100842/Put/seqid=0 2024-12-17T12:38:20,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741980_1156 (size=31055) 2024-12-17T12:38:20,889 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:20,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42368 deadline: 1734439160884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:20,889 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:20,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-17T12:38:20,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42320 deadline: 1734439160885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:20,890 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:20,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42318 deadline: 1734439160886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:20,894 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:20,895 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-17T12:38:20,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:20,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:20,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:20,895 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:20,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:20,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:20,992 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:20,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42368 deadline: 1734439160990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:20,992 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:20,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42320 deadline: 1734439160990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:20,993 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:20,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42318 deadline: 1734439160991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:21,047 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:21,047 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-17T12:38:21,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:21,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:21,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:21,048 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:21,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:21,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:21,079 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/aade91c184f14207afe53bc0f3c5535f as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/aade91c184f14207afe53bc0f3c5535f 2024-12-17T12:38:21,084 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1e405373380390a8eca5f807f91814d6/C of 1e405373380390a8eca5f807f91814d6 into aade91c184f14207afe53bc0f3c5535f(size=11.9 K), total size for store is 23.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:38:21,084 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1e405373380390a8eca5f807f91814d6: 2024-12-17T12:38:21,084 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6., storeName=1e405373380390a8eca5f807f91814d6/C, priority=13, startTime=1734439100557; duration=0sec 2024-12-17T12:38:21,084 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:21,084 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1e405373380390a8eca5f807f91814d6:C 2024-12-17T12:38:21,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-17T12:38:21,195 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:21,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42318 deadline: 1734439161195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:21,197 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:21,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42368 deadline: 1734439161195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:21,198 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:21,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42320 deadline: 1734439161195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:21,199 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:21,200 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-17T12:38:21,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:21,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:21,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:21,200 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:21,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:21,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:21,205 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:21,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439161205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:21,209 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:21,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439161208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:21,282 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=132, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/1edb257536804cb7a461928cc6b86604 2024-12-17T12:38:21,290 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/a6e3078f2bf74bdba37f4d8fa4272d6d is 50, key is test_row_0/B:col10/1734439100842/Put/seqid=0 2024-12-17T12:38:21,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741981_1157 (size=12101) 2024-12-17T12:38:21,303 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/a6e3078f2bf74bdba37f4d8fa4272d6d 2024-12-17T12:38:21,312 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/510c93f247244d98842f0cb584b96d88 is 50, key is test_row_0/C:col10/1734439100842/Put/seqid=0 2024-12-17T12:38:21,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741982_1158 (size=12101) 2024-12-17T12:38:21,348 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/510c93f247244d98842f0cb584b96d88 2024-12-17T12:38:21,352 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:21,353 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-17T12:38:21,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:21,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:21,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:21,353 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:21,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:21,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:21,357 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/1edb257536804cb7a461928cc6b86604 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/1edb257536804cb7a461928cc6b86604 2024-12-17T12:38:21,366 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/1edb257536804cb7a461928cc6b86604, entries=150, sequenceid=132, filesize=30.3 K 2024-12-17T12:38:21,368 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/a6e3078f2bf74bdba37f4d8fa4272d6d as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/a6e3078f2bf74bdba37f4d8fa4272d6d 2024-12-17T12:38:21,373 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/a6e3078f2bf74bdba37f4d8fa4272d6d, entries=150, sequenceid=132, filesize=11.8 K 2024-12-17T12:38:21,375 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/510c93f247244d98842f0cb584b96d88 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/510c93f247244d98842f0cb584b96d88 2024-12-17T12:38:21,379 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/510c93f247244d98842f0cb584b96d88, entries=150, sequenceid=132, filesize=11.8 K 2024-12-17T12:38:21,402 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 1e405373380390a8eca5f807f91814d6 in 558ms, sequenceid=132, compaction requested=true 2024-12-17T12:38:21,402 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1e405373380390a8eca5f807f91814d6: 2024-12-17T12:38:21,402 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1e405373380390a8eca5f807f91814d6:A, priority=-2147483648, current under compaction store size is 1 2024-12-17T12:38:21,402 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:21,402 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:38:21,402 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1e405373380390a8eca5f807f91814d6:B, priority=-2147483648, current under compaction store size is 2 2024-12-17T12:38:21,402 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:38:21,402 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1e405373380390a8eca5f807f91814d6:C, priority=-2147483648, current under compaction store size is 3 2024-12-17T12:38:21,402 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-17T12:38:21,402 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:38:21,404 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:38:21,404 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 1e405373380390a8eca5f807f91814d6/B is initiating minor compaction (all files) 2024-12-17T12:38:21,404 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1e405373380390a8eca5f807f91814d6/B in TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:21,404 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/34c848b094e842388982fe8e38a61c0b, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/7b311827b4364c9eb4e863a04fc2036c, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/a6e3078f2bf74bdba37f4d8fa4272d6d] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp, totalSize=35.5 K 2024-12-17T12:38:21,405 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93171 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:38:21,405 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): 1e405373380390a8eca5f807f91814d6/A is initiating minor compaction (all files) 2024-12-17T12:38:21,405 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1e405373380390a8eca5f807f91814d6/A in TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:21,405 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/6f11a61beae8452b886e894095d512e7, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/ffba13bb77e54cee8de48fb3dbeb5248, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/1edb257536804cb7a461928cc6b86604] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp, totalSize=91.0 K 2024-12-17T12:38:21,405 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:21,405 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. files: [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/6f11a61beae8452b886e894095d512e7, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/ffba13bb77e54cee8de48fb3dbeb5248, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/1edb257536804cb7a461928cc6b86604] 2024-12-17T12:38:21,405 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 34c848b094e842388982fe8e38a61c0b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1734439099980 2024-12-17T12:38:21,407 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6f11a61beae8452b886e894095d512e7, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1734439099980 2024-12-17T12:38:21,408 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 7b311827b4364c9eb4e863a04fc2036c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=121, earliestPutTs=1734439100063 2024-12-17T12:38:21,408 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting ffba13bb77e54cee8de48fb3dbeb5248, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=121, earliestPutTs=1734439100063 2024-12-17T12:38:21,408 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting a6e3078f2bf74bdba37f4d8fa4272d6d, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1734439100697 2024-12-17T12:38:21,408 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1edb257536804cb7a461928cc6b86604, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1734439100697 2024-12-17T12:38:21,419 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1e405373380390a8eca5f807f91814d6#B#compaction#132 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:38:21,421 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:21,421 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/a38db1cef45f461ba0384cc94378624b is 50, key is test_row_0/B:col10/1734439100842/Put/seqid=0 2024-12-17T12:38:21,424 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241217642dc02593e7449fb275aa906bf72182_1e405373380390a8eca5f807f91814d6 store=[table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:21,425 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241217642dc02593e7449fb275aa906bf72182_1e405373380390a8eca5f807f91814d6, store=[table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:21,426 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217642dc02593e7449fb275aa906bf72182_1e405373380390a8eca5f807f91814d6 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:21,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741983_1159 (size=12409) 2024-12-17T12:38:21,437 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/a38db1cef45f461ba0384cc94378624b as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/a38db1cef45f461ba0384cc94378624b 2024-12-17T12:38:21,445 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1e405373380390a8eca5f807f91814d6/B of 1e405373380390a8eca5f807f91814d6 into a38db1cef45f461ba0384cc94378624b(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:38:21,445 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1e405373380390a8eca5f807f91814d6: 2024-12-17T12:38:21,445 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6., storeName=1e405373380390a8eca5f807f91814d6/B, priority=13, startTime=1734439101402; duration=0sec 2024-12-17T12:38:21,445 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:38:21,446 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1e405373380390a8eca5f807f91814d6:B 2024-12-17T12:38:21,446 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:38:21,448 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:38:21,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741984_1160 (size=4469) 2024-12-17T12:38:21,448 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 1e405373380390a8eca5f807f91814d6/C is initiating minor compaction (all files) 2024-12-17T12:38:21,448 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1e405373380390a8eca5f807f91814d6/C in TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:21,448 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/aade91c184f14207afe53bc0f3c5535f, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/498c0595ef874f75ba32a7e0566b6195, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/510c93f247244d98842f0cb584b96d88] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp, totalSize=35.5 K 2024-12-17T12:38:21,448 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting aade91c184f14207afe53bc0f3c5535f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1734439099980 2024-12-17T12:38:21,449 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 498c0595ef874f75ba32a7e0566b6195, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=121, earliestPutTs=1734439100063 2024-12-17T12:38:21,449 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 510c93f247244d98842f0cb584b96d88, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1734439100697 2024-12-17T12:38:21,450 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1e405373380390a8eca5f807f91814d6#A#compaction#133 average throughput is 0.84 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:38:21,450 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/58899fd529514c828d2ce92b18473641 is 175, key is test_row_0/A:col10/1734439100842/Put/seqid=0 2024-12-17T12:38:21,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741985_1161 (size=31363) 2024-12-17T12:38:21,462 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/58899fd529514c828d2ce92b18473641 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/58899fd529514c828d2ce92b18473641 2024-12-17T12:38:21,469 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1e405373380390a8eca5f807f91814d6#C#compaction#134 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:38:21,470 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/8cdc7729cba94ed081310f255c931d00 is 50, key is test_row_0/C:col10/1734439100842/Put/seqid=0 2024-12-17T12:38:21,472 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1e405373380390a8eca5f807f91814d6/A of 1e405373380390a8eca5f807f91814d6 into 58899fd529514c828d2ce92b18473641(size=30.6 K), total size for store is 30.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:38:21,472 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1e405373380390a8eca5f807f91814d6: 2024-12-17T12:38:21,472 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6., storeName=1e405373380390a8eca5f807f91814d6/A, priority=13, startTime=1734439101402; duration=0sec 2024-12-17T12:38:21,472 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:21,472 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1e405373380390a8eca5f807f91814d6:A 2024-12-17T12:38:21,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741986_1162 (size=12409) 2024-12-17T12:38:21,487 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/8cdc7729cba94ed081310f255c931d00 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/8cdc7729cba94ed081310f255c931d00 2024-12-17T12:38:21,492 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1e405373380390a8eca5f807f91814d6/C of 1e405373380390a8eca5f807f91814d6 into 8cdc7729cba94ed081310f255c931d00(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:38:21,492 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1e405373380390a8eca5f807f91814d6: 2024-12-17T12:38:21,492 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6., storeName=1e405373380390a8eca5f807f91814d6/C, priority=13, startTime=1734439101402; duration=0sec 2024-12-17T12:38:21,492 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:21,492 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1e405373380390a8eca5f807f91814d6:C 2024-12-17T12:38:21,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:21,500 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1e405373380390a8eca5f807f91814d6 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-17T12:38:21,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=A 2024-12-17T12:38:21,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:21,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=B 2024-12-17T12:38:21,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:21,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=C 2024-12-17T12:38:21,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:21,506 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:21,508 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217606f6e5a2db943b9943bef8cb1f01ea2_1e405373380390a8eca5f807f91814d6 is 50, key is test_row_0/A:col10/1734439100878/Put/seqid=0 2024-12-17T12:38:21,508 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:21,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42320 deadline: 1734439161506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:21,509 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-17T12:38:21,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:21,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:21,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:21,509 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:21,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:21,509 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:21,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42318 deadline: 1734439161508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:21,510 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:21,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:21,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42368 deadline: 1734439161508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:21,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741987_1163 (size=14794) 2024-12-17T12:38:21,524 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:21,530 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217606f6e5a2db943b9943bef8cb1f01ea2_1e405373380390a8eca5f807f91814d6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217606f6e5a2db943b9943bef8cb1f01ea2_1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:21,532 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/fccd2ef7e1c34ba38248d4fa85d293fe, store: [table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:21,533 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/fccd2ef7e1c34ba38248d4fa85d293fe is 175, key is test_row_0/A:col10/1734439100878/Put/seqid=0 2024-12-17T12:38:21,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741988_1164 (size=39749) 2024-12-17T12:38:21,545 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=163, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/fccd2ef7e1c34ba38248d4fa85d293fe 2024-12-17T12:38:21,555 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/ed721ab2b3044baebded65c704b0929a is 50, key is test_row_0/B:col10/1734439100878/Put/seqid=0 2024-12-17T12:38:21,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741989_1165 (size=12151) 2024-12-17T12:38:21,559 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=163 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/ed721ab2b3044baebded65c704b0929a 2024-12-17T12:38:21,569 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/f9a240935ccb477489c29f8cd96cd806 is 50, key is test_row_0/C:col10/1734439100878/Put/seqid=0 2024-12-17T12:38:21,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741990_1166 (size=12151) 2024-12-17T12:38:21,580 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=163 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/f9a240935ccb477489c29f8cd96cd806 2024-12-17T12:38:21,586 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/fccd2ef7e1c34ba38248d4fa85d293fe as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/fccd2ef7e1c34ba38248d4fa85d293fe 2024-12-17T12:38:21,592 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/fccd2ef7e1c34ba38248d4fa85d293fe, entries=200, sequenceid=163, filesize=38.8 K 2024-12-17T12:38:21,594 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/ed721ab2b3044baebded65c704b0929a as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/ed721ab2b3044baebded65c704b0929a 2024-12-17T12:38:21,601 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/ed721ab2b3044baebded65c704b0929a, entries=150, sequenceid=163, filesize=11.9 K 2024-12-17T12:38:21,602 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/f9a240935ccb477489c29f8cd96cd806 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/f9a240935ccb477489c29f8cd96cd806 2024-12-17T12:38:21,608 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/f9a240935ccb477489c29f8cd96cd806, entries=150, sequenceid=163, filesize=11.9 K 2024-12-17T12:38:21,609 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 1e405373380390a8eca5f807f91814d6 in 110ms, sequenceid=163, compaction requested=false 2024-12-17T12:38:21,609 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1e405373380390a8eca5f807f91814d6: 2024-12-17T12:38:21,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:21,614 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1e405373380390a8eca5f807f91814d6 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-17T12:38:21,615 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=A 2024-12-17T12:38:21,615 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:21,615 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=B 2024-12-17T12:38:21,615 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:21,615 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=C 2024-12-17T12:38:21,615 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:21,625 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412172dbef429c05c4fc59a315acc820a6764_1e405373380390a8eca5f807f91814d6 is 50, key is test_row_0/A:col10/1734439101611/Put/seqid=0 2024-12-17T12:38:21,647 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:21,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42368 deadline: 1734439161643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:21,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741991_1167 (size=14794) 2024-12-17T12:38:21,648 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:21,648 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:21,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42318 deadline: 1734439161646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:21,648 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:21,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42320 deadline: 1734439161647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:21,653 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412172dbef429c05c4fc59a315acc820a6764_1e405373380390a8eca5f807f91814d6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412172dbef429c05c4fc59a315acc820a6764_1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:21,654 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/ca6fffae29d3471abb8a558be9fc8b5f, store: [table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:21,655 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/ca6fffae29d3471abb8a558be9fc8b5f is 175, key is test_row_0/A:col10/1734439101611/Put/seqid=0 2024-12-17T12:38:21,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741992_1168 (size=39749) 2024-12-17T12:38:21,660 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=176, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/ca6fffae29d3471abb8a558be9fc8b5f 2024-12-17T12:38:21,661 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:21,662 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-17T12:38:21,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:21,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:21,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:21,662 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:21,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:21,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:21,672 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/64fc166784c644b783411886b1d0159d is 50, key is test_row_0/B:col10/1734439101611/Put/seqid=0 2024-12-17T12:38:21,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-17T12:38:21,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741993_1169 (size=12151) 2024-12-17T12:38:21,750 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:21,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42368 deadline: 1734439161748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:21,751 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:21,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42318 deadline: 1734439161749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:21,751 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:21,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42320 deadline: 1734439161749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:21,823 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:21,823 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-17T12:38:21,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:21,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:21,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:21,824 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:21,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:21,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:21,952 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:21,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42318 deadline: 1734439161952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:21,953 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:21,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42320 deadline: 1734439161953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:21,954 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:21,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42368 deadline: 1734439161954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:21,976 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:21,977 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-17T12:38:21,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:21,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:21,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:21,977 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:21,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:21,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:22,098 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=176 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/64fc166784c644b783411886b1d0159d 2024-12-17T12:38:22,107 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/d89964cc17534f1fa1c35a9bc3fb0bd5 is 50, key is test_row_0/C:col10/1734439101611/Put/seqid=0 2024-12-17T12:38:22,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741994_1170 (size=12151) 2024-12-17T12:38:22,115 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=176 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/d89964cc17534f1fa1c35a9bc3fb0bd5 2024-12-17T12:38:22,121 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/ca6fffae29d3471abb8a558be9fc8b5f as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/ca6fffae29d3471abb8a558be9fc8b5f 2024-12-17T12:38:22,129 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:22,129 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-17T12:38:22,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:22,130 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/ca6fffae29d3471abb8a558be9fc8b5f, entries=200, sequenceid=176, filesize=38.8 K 2024-12-17T12:38:22,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:22,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:22,130 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:22,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:22,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:22,132 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/64fc166784c644b783411886b1d0159d as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/64fc166784c644b783411886b1d0159d 2024-12-17T12:38:22,140 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/64fc166784c644b783411886b1d0159d, entries=150, sequenceid=176, filesize=11.9 K 2024-12-17T12:38:22,141 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/d89964cc17534f1fa1c35a9bc3fb0bd5 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/d89964cc17534f1fa1c35a9bc3fb0bd5 2024-12-17T12:38:22,149 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/d89964cc17534f1fa1c35a9bc3fb0bd5, entries=150, sequenceid=176, filesize=11.9 K 2024-12-17T12:38:22,150 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 1e405373380390a8eca5f807f91814d6 in 537ms, sequenceid=176, compaction requested=true 2024-12-17T12:38:22,150 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1e405373380390a8eca5f807f91814d6: 2024-12-17T12:38:22,150 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:38:22,151 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1e405373380390a8eca5f807f91814d6:A, priority=-2147483648, current under compaction store size is 1 2024-12-17T12:38:22,151 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:22,151 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:38:22,151 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1e405373380390a8eca5f807f91814d6:B, priority=-2147483648, current under compaction store size is 2 2024-12-17T12:38:22,151 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:22,151 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1e405373380390a8eca5f807f91814d6:C, priority=-2147483648, current under compaction store size is 3 2024-12-17T12:38:22,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:38:22,153 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 110861 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:38:22,153 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): 1e405373380390a8eca5f807f91814d6/A is initiating minor compaction (all files) 2024-12-17T12:38:22,154 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1e405373380390a8eca5f807f91814d6/A in TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:22,154 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/58899fd529514c828d2ce92b18473641, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/fccd2ef7e1c34ba38248d4fa85d293fe, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/ca6fffae29d3471abb8a558be9fc8b5f] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp, totalSize=108.3 K 2024-12-17T12:38:22,154 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:22,154 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. files: [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/58899fd529514c828d2ce92b18473641, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/fccd2ef7e1c34ba38248d4fa85d293fe, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/ca6fffae29d3471abb8a558be9fc8b5f] 2024-12-17T12:38:22,154 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36711 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:38:22,155 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 1e405373380390a8eca5f807f91814d6/B is initiating minor compaction (all files) 2024-12-17T12:38:22,155 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1e405373380390a8eca5f807f91814d6/B in TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:22,155 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/a38db1cef45f461ba0384cc94378624b, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/ed721ab2b3044baebded65c704b0929a, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/64fc166784c644b783411886b1d0159d] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp, totalSize=35.9 K 2024-12-17T12:38:22,155 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 58899fd529514c828d2ce92b18473641, keycount=150, bloomtype=ROW, size=30.6 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1734439100697 2024-12-17T12:38:22,155 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting a38db1cef45f461ba0384cc94378624b, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1734439100697 2024-12-17T12:38:22,156 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting fccd2ef7e1c34ba38248d4fa85d293fe, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1734439100878 2024-12-17T12:38:22,156 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting ed721ab2b3044baebded65c704b0929a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1734439100878 2024-12-17T12:38:22,156 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting ca6fffae29d3471abb8a558be9fc8b5f, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1734439101506 2024-12-17T12:38:22,157 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 64fc166784c644b783411886b1d0159d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1734439101506 2024-12-17T12:38:22,165 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:22,177 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1e405373380390a8eca5f807f91814d6#B#compaction#142 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:38:22,178 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/ce3ba2848f5d44118b569c14662de307 is 50, key is test_row_0/B:col10/1734439101611/Put/seqid=0 2024-12-17T12:38:22,188 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412172a6cbc3cf11c443d9c0972a7c3b66f58_1e405373380390a8eca5f807f91814d6 store=[table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:22,190 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412172a6cbc3cf11c443d9c0972a7c3b66f58_1e405373380390a8eca5f807f91814d6, store=[table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:22,190 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412172a6cbc3cf11c443d9c0972a7c3b66f58_1e405373380390a8eca5f807f91814d6 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:22,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741995_1171 (size=12561) 2024-12-17T12:38:22,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:22,212 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1e405373380390a8eca5f807f91814d6 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-17T12:38:22,213 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=A 2024-12-17T12:38:22,213 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:22,213 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=B 2024-12-17T12:38:22,213 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:22,213 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=C 2024-12-17T12:38:22,213 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:22,218 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/ce3ba2848f5d44118b569c14662de307 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/ce3ba2848f5d44118b569c14662de307 2024-12-17T12:38:22,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741996_1172 (size=4469) 2024-12-17T12:38:22,226 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1e405373380390a8eca5f807f91814d6#A#compaction#141 average throughput is 0.40 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:38:22,226 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/f3fa67265e4d45eca56e78ad73a155cd is 175, key is test_row_0/A:col10/1734439101611/Put/seqid=0 2024-12-17T12:38:22,234 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1e405373380390a8eca5f807f91814d6/B of 1e405373380390a8eca5f807f91814d6 into ce3ba2848f5d44118b569c14662de307(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:38:22,234 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1e405373380390a8eca5f807f91814d6: 2024-12-17T12:38:22,234 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6., storeName=1e405373380390a8eca5f807f91814d6/B, priority=13, startTime=1734439102151; duration=0sec 2024-12-17T12:38:22,234 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:38:22,234 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1e405373380390a8eca5f807f91814d6:B 2024-12-17T12:38:22,234 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:38:22,237 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36711 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:38:22,237 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 1e405373380390a8eca5f807f91814d6/C is initiating minor compaction (all files) 2024-12-17T12:38:22,238 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1e405373380390a8eca5f807f91814d6/C in TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:22,238 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/8cdc7729cba94ed081310f255c931d00, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/f9a240935ccb477489c29f8cd96cd806, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/d89964cc17534f1fa1c35a9bc3fb0bd5] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp, totalSize=35.9 K 2024-12-17T12:38:22,239 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 8cdc7729cba94ed081310f255c931d00, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1734439100697 2024-12-17T12:38:22,239 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:22,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439162235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:22,239 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:22,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439162235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:22,240 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting f9a240935ccb477489c29f8cd96cd806, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1734439100878 2024-12-17T12:38:22,240 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting d89964cc17534f1fa1c35a9bc3fb0bd5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1734439101506 2024-12-17T12:38:22,253 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217c6e631f88d504a56a1362430249838ef_1e405373380390a8eca5f807f91814d6 is 50, key is test_row_0/A:col10/1734439101641/Put/seqid=0 2024-12-17T12:38:22,255 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:22,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42320 deadline: 1734439162254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:22,255 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:22,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42318 deadline: 1734439162254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:22,258 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:22,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42368 deadline: 1734439162258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:22,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741997_1173 (size=31515) 2024-12-17T12:38:22,282 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1e405373380390a8eca5f807f91814d6#C#compaction#144 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:38:22,282 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:22,283 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/eb8ab45c9c6e4bf9b0c1e860bb742837 is 50, key is test_row_0/C:col10/1734439101611/Put/seqid=0 2024-12-17T12:38:22,283 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-17T12:38:22,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:22,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:22,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:22,283 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:22,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:22,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:22,289 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/f3fa67265e4d45eca56e78ad73a155cd as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/f3fa67265e4d45eca56e78ad73a155cd 2024-12-17T12:38:22,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741998_1174 (size=12304) 2024-12-17T12:38:22,298 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1e405373380390a8eca5f807f91814d6/A of 1e405373380390a8eca5f807f91814d6 into f3fa67265e4d45eca56e78ad73a155cd(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:38:22,298 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1e405373380390a8eca5f807f91814d6: 2024-12-17T12:38:22,298 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6., storeName=1e405373380390a8eca5f807f91814d6/A, priority=13, startTime=1734439102150; duration=0sec 2024-12-17T12:38:22,298 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:22,298 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1e405373380390a8eca5f807f91814d6:A 2024-12-17T12:38:22,298 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:22,305 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217c6e631f88d504a56a1362430249838ef_1e405373380390a8eca5f807f91814d6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217c6e631f88d504a56a1362430249838ef_1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:22,306 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/5fadb68a4f4246eda36914464d4bea56, store: [table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:22,306 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/5fadb68a4f4246eda36914464d4bea56 is 175, key is test_row_0/A:col10/1734439101641/Put/seqid=0 2024-12-17T12:38:22,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741999_1175 (size=12561) 2024-12-17T12:38:22,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742000_1176 (size=31105) 2024-12-17T12:38:22,343 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:22,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439162340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:22,343 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:22,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439162340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:22,436 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:22,436 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-17T12:38:22,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:22,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:22,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:22,437 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:22,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:22,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:22,545 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:22,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439162544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:22,545 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:22,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439162545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:22,588 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:22,589 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-17T12:38:22,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:22,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:22,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:22,589 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:22,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:22,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:22,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-17T12:38:22,718 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/eb8ab45c9c6e4bf9b0c1e860bb742837 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/eb8ab45c9c6e4bf9b0c1e860bb742837 2024-12-17T12:38:22,720 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=200, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/5fadb68a4f4246eda36914464d4bea56 2024-12-17T12:38:22,729 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1e405373380390a8eca5f807f91814d6/C of 1e405373380390a8eca5f807f91814d6 into eb8ab45c9c6e4bf9b0c1e860bb742837(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:38:22,729 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1e405373380390a8eca5f807f91814d6: 2024-12-17T12:38:22,729 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6., storeName=1e405373380390a8eca5f807f91814d6/C, priority=13, startTime=1734439102151; duration=0sec 2024-12-17T12:38:22,729 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:22,729 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1e405373380390a8eca5f807f91814d6:C 2024-12-17T12:38:22,734 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/e56a1377797f418da043a76cc5a4f9da is 50, key is test_row_0/B:col10/1734439101641/Put/seqid=0 2024-12-17T12:38:22,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742001_1177 (size=12151) 2024-12-17T12:38:22,741 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:22,743 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=200 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/e56a1377797f418da043a76cc5a4f9da 2024-12-17T12:38:22,743 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-17T12:38:22,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:22,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:22,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:22,744 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:22,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:22,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:22,751 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/c1c6bd0df0b94f97b1b16d7f4af15f19 is 50, key is test_row_0/C:col10/1734439101641/Put/seqid=0 2024-12-17T12:38:22,759 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:22,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42320 deadline: 1734439162759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:22,762 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:22,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42318 deadline: 1734439162760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:22,763 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:22,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42368 deadline: 1734439162763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:22,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742002_1178 (size=12151) 2024-12-17T12:38:22,770 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=200 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/c1c6bd0df0b94f97b1b16d7f4af15f19 2024-12-17T12:38:22,775 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/5fadb68a4f4246eda36914464d4bea56 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/5fadb68a4f4246eda36914464d4bea56 2024-12-17T12:38:22,781 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/5fadb68a4f4246eda36914464d4bea56, entries=150, sequenceid=200, filesize=30.4 K 2024-12-17T12:38:22,783 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/e56a1377797f418da043a76cc5a4f9da as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/e56a1377797f418da043a76cc5a4f9da 2024-12-17T12:38:22,788 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/e56a1377797f418da043a76cc5a4f9da, entries=150, sequenceid=200, filesize=11.9 K 2024-12-17T12:38:22,789 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/c1c6bd0df0b94f97b1b16d7f4af15f19 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/c1c6bd0df0b94f97b1b16d7f4af15f19 2024-12-17T12:38:22,795 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/c1c6bd0df0b94f97b1b16d7f4af15f19, entries=150, sequenceid=200, filesize=11.9 K 2024-12-17T12:38:22,795 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 1e405373380390a8eca5f807f91814d6 in 583ms, sequenceid=200, compaction requested=false 2024-12-17T12:38:22,796 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1e405373380390a8eca5f807f91814d6: 2024-12-17T12:38:22,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:22,847 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1e405373380390a8eca5f807f91814d6 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-17T12:38:22,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=A 2024-12-17T12:38:22,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:22,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=B 2024-12-17T12:38:22,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:22,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=C 2024-12-17T12:38:22,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:22,854 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217b5f75e12aff84c1398e6b50f58f92555_1e405373380390a8eca5f807f91814d6 is 50, key is test_row_0/A:col10/1734439102233/Put/seqid=0 2024-12-17T12:38:22,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742003_1179 (size=14794) 2024-12-17T12:38:22,871 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:22,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439162869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:22,871 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:22,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439162870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:22,896 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:22,896 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-17T12:38:22,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:22,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:22,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:22,896 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:22,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:22,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:22,973 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:22,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439162972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:22,973 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:22,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439162972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:23,048 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:23,048 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-17T12:38:23,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:23,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:23,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:23,049 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:23,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:23,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:23,176 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:23,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439163174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:23,176 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:23,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439163175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:23,201 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:23,201 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-17T12:38:23,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:23,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:23,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:23,202 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:23,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:23,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:23,259 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:23,263 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217b5f75e12aff84c1398e6b50f58f92555_1e405373380390a8eca5f807f91814d6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217b5f75e12aff84c1398e6b50f58f92555_1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:23,264 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/3101c90c86dc4031a9bc6b44ec702d5e, store: [table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:23,265 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/3101c90c86dc4031a9bc6b44ec702d5e is 175, key is test_row_0/A:col10/1734439102233/Put/seqid=0 2024-12-17T12:38:23,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742004_1180 (size=39749) 2024-12-17T12:38:23,354 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:23,354 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-17T12:38:23,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:23,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:23,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:23,354 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:23,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:23,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:23,479 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:23,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439163477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:23,481 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:23,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439163478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:23,506 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:23,507 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-17T12:38:23,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:23,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:23,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:23,507 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:23,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:23,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:23,659 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:23,659 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-17T12:38:23,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:23,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:23,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:23,660 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:23,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:23,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:23,669 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=217, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/3101c90c86dc4031a9bc6b44ec702d5e 2024-12-17T12:38:23,677 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/9d88a4278fdb475ea5f4baddfc2498fd is 50, key is test_row_0/B:col10/1734439102233/Put/seqid=0 2024-12-17T12:38:23,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742005_1181 (size=12151) 2024-12-17T12:38:23,762 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:23,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42320 deadline: 1734439163762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:23,769 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:23,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42318 deadline: 1734439163768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:23,772 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:23,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42368 deadline: 1734439163771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:23,811 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:23,812 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-17T12:38:23,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:23,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:23,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:23,812 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:23,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:23,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:23,931 INFO [master/681c08bfdbdf:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-17T12:38:23,931 INFO [master/681c08bfdbdf:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-17T12:38:23,964 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:23,965 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-17T12:38:23,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:23,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:23,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:23,965 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:23,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:23,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:23,984 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:23,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439163981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:23,984 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:23,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439163982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:24,081 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=217 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/9d88a4278fdb475ea5f4baddfc2498fd 2024-12-17T12:38:24,088 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/eccf3f8b305a4368bdcad4215c1bee1e is 50, key is test_row_0/C:col10/1734439102233/Put/seqid=0 2024-12-17T12:38:24,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742006_1182 (size=12151) 2024-12-17T12:38:24,108 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=217 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/eccf3f8b305a4368bdcad4215c1bee1e 2024-12-17T12:38:24,113 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/3101c90c86dc4031a9bc6b44ec702d5e as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/3101c90c86dc4031a9bc6b44ec702d5e 2024-12-17T12:38:24,117 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:24,117 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-17T12:38:24,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:24,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:24,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:24,118 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:24,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:24,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:24,119 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/3101c90c86dc4031a9bc6b44ec702d5e, entries=200, sequenceid=217, filesize=38.8 K 2024-12-17T12:38:24,120 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/9d88a4278fdb475ea5f4baddfc2498fd as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/9d88a4278fdb475ea5f4baddfc2498fd 2024-12-17T12:38:24,126 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/9d88a4278fdb475ea5f4baddfc2498fd, entries=150, sequenceid=217, filesize=11.9 K 2024-12-17T12:38:24,127 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/eccf3f8b305a4368bdcad4215c1bee1e as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/eccf3f8b305a4368bdcad4215c1bee1e 2024-12-17T12:38:24,132 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/eccf3f8b305a4368bdcad4215c1bee1e, entries=150, sequenceid=217, filesize=11.9 K 2024-12-17T12:38:24,133 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 1e405373380390a8eca5f807f91814d6 in 1286ms, sequenceid=217, compaction requested=true 2024-12-17T12:38:24,133 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1e405373380390a8eca5f807f91814d6: 2024-12-17T12:38:24,133 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:38:24,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1e405373380390a8eca5f807f91814d6:A, priority=-2147483648, current under compaction store size is 1 2024-12-17T12:38:24,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:24,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1e405373380390a8eca5f807f91814d6:B, priority=-2147483648, current under compaction store size is 2 2024-12-17T12:38:24,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:24,134 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:38:24,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1e405373380390a8eca5f807f91814d6:C, priority=-2147483648, current under compaction store size is 3 2024-12-17T12:38:24,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:38:24,135 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102369 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:38:24,135 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): 1e405373380390a8eca5f807f91814d6/A is initiating minor compaction (all files) 2024-12-17T12:38:24,135 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1e405373380390a8eca5f807f91814d6/A in TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:24,135 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/f3fa67265e4d45eca56e78ad73a155cd, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/5fadb68a4f4246eda36914464d4bea56, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/3101c90c86dc4031a9bc6b44ec702d5e] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp, totalSize=100.0 K 2024-12-17T12:38:24,135 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:24,135 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. files: [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/f3fa67265e4d45eca56e78ad73a155cd, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/5fadb68a4f4246eda36914464d4bea56, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/3101c90c86dc4031a9bc6b44ec702d5e] 2024-12-17T12:38:24,136 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:38:24,136 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 1e405373380390a8eca5f807f91814d6/B is initiating minor compaction (all files) 2024-12-17T12:38:24,136 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1e405373380390a8eca5f807f91814d6/B in TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:24,136 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/ce3ba2848f5d44118b569c14662de307, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/e56a1377797f418da043a76cc5a4f9da, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/9d88a4278fdb475ea5f4baddfc2498fd] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp, totalSize=36.0 K 2024-12-17T12:38:24,137 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting ce3ba2848f5d44118b569c14662de307, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1734439101506 2024-12-17T12:38:24,137 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting f3fa67265e4d45eca56e78ad73a155cd, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1734439101506 2024-12-17T12:38:24,137 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting e56a1377797f418da043a76cc5a4f9da, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1734439101641 2024-12-17T12:38:24,137 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5fadb68a4f4246eda36914464d4bea56, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1734439101641 2024-12-17T12:38:24,138 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 9d88a4278fdb475ea5f4baddfc2498fd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1734439102233 2024-12-17T12:38:24,139 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3101c90c86dc4031a9bc6b44ec702d5e, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1734439102229 2024-12-17T12:38:24,147 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:24,164 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1e405373380390a8eca5f807f91814d6#B#compaction#151 average throughput is 0.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:38:24,165 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/cd69e0b600eb4c6bb4ef1cc76a0a7584 is 50, key is test_row_0/B:col10/1734439102233/Put/seqid=0 2024-12-17T12:38:24,168 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412177422f4c87e4841c3ad495eb286687938_1e405373380390a8eca5f807f91814d6 store=[table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:24,169 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412177422f4c87e4841c3ad495eb286687938_1e405373380390a8eca5f807f91814d6, store=[table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:24,170 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412177422f4c87e4841c3ad495eb286687938_1e405373380390a8eca5f807f91814d6 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:24,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742007_1183 (size=12663) 2024-12-17T12:38:24,183 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/cd69e0b600eb4c6bb4ef1cc76a0a7584 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/cd69e0b600eb4c6bb4ef1cc76a0a7584 2024-12-17T12:38:24,188 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1e405373380390a8eca5f807f91814d6/B of 1e405373380390a8eca5f807f91814d6 into cd69e0b600eb4c6bb4ef1cc76a0a7584(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:38:24,188 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1e405373380390a8eca5f807f91814d6: 2024-12-17T12:38:24,188 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6., storeName=1e405373380390a8eca5f807f91814d6/B, priority=13, startTime=1734439104134; duration=0sec 2024-12-17T12:38:24,189 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:38:24,189 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1e405373380390a8eca5f807f91814d6:B 2024-12-17T12:38:24,189 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:38:24,191 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:38:24,191 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 1e405373380390a8eca5f807f91814d6/C is initiating minor compaction (all files) 2024-12-17T12:38:24,191 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1e405373380390a8eca5f807f91814d6/C in TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:24,192 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/eb8ab45c9c6e4bf9b0c1e860bb742837, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/c1c6bd0df0b94f97b1b16d7f4af15f19, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/eccf3f8b305a4368bdcad4215c1bee1e] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp, totalSize=36.0 K 2024-12-17T12:38:24,192 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting eb8ab45c9c6e4bf9b0c1e860bb742837, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1734439101506 2024-12-17T12:38:24,193 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting c1c6bd0df0b94f97b1b16d7f4af15f19, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1734439101641 2024-12-17T12:38:24,193 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting eccf3f8b305a4368bdcad4215c1bee1e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1734439102233 2024-12-17T12:38:24,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742008_1184 (size=4469) 2024-12-17T12:38:24,201 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1e405373380390a8eca5f807f91814d6#C#compaction#152 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:38:24,202 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/07cfe1eaa6634426b218d472d13168be is 50, key is test_row_0/C:col10/1734439102233/Put/seqid=0 2024-12-17T12:38:24,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742009_1185 (size=12663) 2024-12-17T12:38:24,270 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:24,270 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-17T12:38:24,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:24,271 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2837): Flushing 1e405373380390a8eca5f807f91814d6 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-17T12:38:24,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=A 2024-12-17T12:38:24,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:24,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=B 2024-12-17T12:38:24,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:24,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=C 2024-12-17T12:38:24,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:24,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121774b9db08c69740c59cff5b5ad1ed138a_1e405373380390a8eca5f807f91814d6 is 50, key is test_row_0/A:col10/1734439102867/Put/seqid=0 2024-12-17T12:38:24,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742010_1186 (size=12304) 2024-12-17T12:38:24,597 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1e405373380390a8eca5f807f91814d6#A#compaction#150 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:38:24,598 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/9c0bb2d768064a3f974276799654e408 is 175, key is test_row_0/A:col10/1734439102233/Put/seqid=0 2024-12-17T12:38:24,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742011_1187 (size=31617) 2024-12-17T12:38:24,611 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/07cfe1eaa6634426b218d472d13168be as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/07cfe1eaa6634426b218d472d13168be 2024-12-17T12:38:24,615 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1e405373380390a8eca5f807f91814d6/C of 1e405373380390a8eca5f807f91814d6 into 07cfe1eaa6634426b218d472d13168be(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:38:24,615 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1e405373380390a8eca5f807f91814d6: 2024-12-17T12:38:24,615 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6., storeName=1e405373380390a8eca5f807f91814d6/C, priority=13, startTime=1734439104134; duration=0sec 2024-12-17T12:38:24,615 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:24,615 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1e405373380390a8eca5f807f91814d6:C 2024-12-17T12:38:24,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:24,686 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121774b9db08c69740c59cff5b5ad1ed138a_1e405373380390a8eca5f807f91814d6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121774b9db08c69740c59cff5b5ad1ed138a_1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:24,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/bd084fcb284e4e11af809e2d78467437, store: [table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:24,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/bd084fcb284e4e11af809e2d78467437 is 175, key is test_row_0/A:col10/1734439102867/Put/seqid=0 2024-12-17T12:38:24,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742012_1188 (size=31105) 2024-12-17T12:38:24,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-17T12:38:24,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:24,987 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:25,000 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:25,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439164999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:25,001 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:25,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439164999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:25,006 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/9c0bb2d768064a3f974276799654e408 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/9c0bb2d768064a3f974276799654e408 2024-12-17T12:38:25,011 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1e405373380390a8eca5f807f91814d6/A of 1e405373380390a8eca5f807f91814d6 into 9c0bb2d768064a3f974276799654e408(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:38:25,011 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1e405373380390a8eca5f807f91814d6: 2024-12-17T12:38:25,011 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6., storeName=1e405373380390a8eca5f807f91814d6/A, priority=13, startTime=1734439104133; duration=0sec 2024-12-17T12:38:25,011 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:25,011 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1e405373380390a8eca5f807f91814d6:A 2024-12-17T12:38:25,091 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=240, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/bd084fcb284e4e11af809e2d78467437 2024-12-17T12:38:25,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/fa3f3d4960c74860b5a1747a44d74edb is 50, key is test_row_0/B:col10/1734439102867/Put/seqid=0 2024-12-17T12:38:25,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742013_1189 (size=12151) 2024-12-17T12:38:25,103 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:25,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439165101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:25,104 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:25,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439165102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:25,306 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:25,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439165305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:25,307 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:25,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439165306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:25,503 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=240 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/fa3f3d4960c74860b5a1747a44d74edb 2024-12-17T12:38:25,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/8d5e5eda76b749e49a8484c7ccfd661b is 50, key is test_row_0/C:col10/1734439102867/Put/seqid=0 2024-12-17T12:38:25,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742014_1190 (size=12151) 2024-12-17T12:38:25,608 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:25,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439165607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:25,608 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:25,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439165608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:25,771 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:25,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42318 deadline: 1734439165771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:25,772 DEBUG [Thread-612 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4127 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6., hostname=681c08bfdbdf,36491,1734439058372, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor41.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-17T12:38:25,774 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:25,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42368 deadline: 1734439165774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:25,775 DEBUG [Thread-620 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4133 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6., hostname=681c08bfdbdf,36491,1734439058372, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor41.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-17T12:38:25,782 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:25,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42320 deadline: 1734439165782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:25,783 DEBUG [Thread-614 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4136 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6., hostname=681c08bfdbdf,36491,1734439058372, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor41.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-17T12:38:25,914 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=240 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/8d5e5eda76b749e49a8484c7ccfd661b 2024-12-17T12:38:25,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/bd084fcb284e4e11af809e2d78467437 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/bd084fcb284e4e11af809e2d78467437 2024-12-17T12:38:25,922 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/bd084fcb284e4e11af809e2d78467437, entries=150, sequenceid=240, filesize=30.4 K 2024-12-17T12:38:25,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/fa3f3d4960c74860b5a1747a44d74edb as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/fa3f3d4960c74860b5a1747a44d74edb 2024-12-17T12:38:25,927 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/fa3f3d4960c74860b5a1747a44d74edb, entries=150, sequenceid=240, filesize=11.9 K 2024-12-17T12:38:25,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/8d5e5eda76b749e49a8484c7ccfd661b as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/8d5e5eda76b749e49a8484c7ccfd661b 2024-12-17T12:38:25,932 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/8d5e5eda76b749e49a8484c7ccfd661b, entries=150, sequenceid=240, filesize=11.9 K 2024-12-17T12:38:25,933 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 1e405373380390a8eca5f807f91814d6 in 1662ms, sequenceid=240, compaction requested=false 2024-12-17T12:38:25,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2538): Flush status journal for 1e405373380390a8eca5f807f91814d6: 2024-12-17T12:38:25,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:25,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=44 2024-12-17T12:38:25,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4106): Remote procedure done, pid=44 2024-12-17T12:38:25,935 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=44, resume processing ppid=43 2024-12-17T12:38:25,935 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, ppid=43, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 5.3460 sec 2024-12-17T12:38:25,937 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees in 5.3510 sec 2024-12-17T12:38:26,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:26,113 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1e405373380390a8eca5f807f91814d6 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-17T12:38:26,113 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=A 2024-12-17T12:38:26,113 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:26,113 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=B 2024-12-17T12:38:26,113 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:26,113 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=C 2024-12-17T12:38:26,113 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:26,120 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217c13fd1eeafd147f29427520a4ec96e5d_1e405373380390a8eca5f807f91814d6 is 50, key is test_row_0/A:col10/1734439104998/Put/seqid=0 2024-12-17T12:38:26,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742015_1191 (size=12354) 2024-12-17T12:38:26,135 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:26,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439166133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:26,136 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:26,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439166135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:26,237 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:26,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439166236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:26,238 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:26,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439166237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:26,440 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:26,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439166439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:26,441 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:26,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439166439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:26,525 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:26,553 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217c13fd1eeafd147f29427520a4ec96e5d_1e405373380390a8eca5f807f91814d6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217c13fd1eeafd147f29427520a4ec96e5d_1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:26,554 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/f7c80a2e770b4cc5af945bff68ed632c, store: [table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:26,554 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/f7c80a2e770b4cc5af945bff68ed632c is 175, key is test_row_0/A:col10/1734439104998/Put/seqid=0 2024-12-17T12:38:26,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742016_1192 (size=31155) 2024-12-17T12:38:26,743 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:26,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439166741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:26,743 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:26,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439166742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:26,960 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=257, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/f7c80a2e770b4cc5af945bff68ed632c 2024-12-17T12:38:26,965 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/2ba0812fff8c4d2aac25ffba353837e6 is 50, key is test_row_0/B:col10/1734439104998/Put/seqid=0 2024-12-17T12:38:26,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742017_1193 (size=12201) 2024-12-17T12:38:27,245 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:27,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439167244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:27,245 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:27,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439167245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:27,369 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=257 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/2ba0812fff8c4d2aac25ffba353837e6 2024-12-17T12:38:27,377 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/78e28aeb891b43e4aca72cd10a0f245f is 50, key is test_row_0/C:col10/1734439104998/Put/seqid=0 2024-12-17T12:38:27,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742018_1194 (size=12201) 2024-12-17T12:38:27,786 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=257 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/78e28aeb891b43e4aca72cd10a0f245f 2024-12-17T12:38:27,790 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/f7c80a2e770b4cc5af945bff68ed632c as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/f7c80a2e770b4cc5af945bff68ed632c 2024-12-17T12:38:27,794 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/f7c80a2e770b4cc5af945bff68ed632c, entries=150, sequenceid=257, filesize=30.4 K 2024-12-17T12:38:27,795 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/2ba0812fff8c4d2aac25ffba353837e6 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/2ba0812fff8c4d2aac25ffba353837e6 2024-12-17T12:38:27,801 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/2ba0812fff8c4d2aac25ffba353837e6, entries=150, sequenceid=257, filesize=11.9 K 2024-12-17T12:38:27,802 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/78e28aeb891b43e4aca72cd10a0f245f as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/78e28aeb891b43e4aca72cd10a0f245f 2024-12-17T12:38:27,805 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/78e28aeb891b43e4aca72cd10a0f245f, entries=150, sequenceid=257, filesize=11.9 K 2024-12-17T12:38:27,806 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 1e405373380390a8eca5f807f91814d6 in 1693ms, sequenceid=257, compaction requested=true 2024-12-17T12:38:27,806 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1e405373380390a8eca5f807f91814d6: 2024-12-17T12:38:27,806 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1e405373380390a8eca5f807f91814d6:A, priority=-2147483648, current under compaction store size is 1 2024-12-17T12:38:27,806 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:27,806 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:38:27,806 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1e405373380390a8eca5f807f91814d6:B, priority=-2147483648, current under compaction store size is 2 2024-12-17T12:38:27,806 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:27,806 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1e405373380390a8eca5f807f91814d6:C, priority=-2147483648, current under compaction store size is 3 2024-12-17T12:38:27,806 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:38:27,806 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:38:27,807 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93877 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:38:27,807 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37015 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:38:27,808 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 1e405373380390a8eca5f807f91814d6/B is initiating minor compaction (all files) 2024-12-17T12:38:27,808 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): 1e405373380390a8eca5f807f91814d6/A is initiating minor compaction (all files) 2024-12-17T12:38:27,808 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1e405373380390a8eca5f807f91814d6/A in TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:27,808 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1e405373380390a8eca5f807f91814d6/B in TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:27,808 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/9c0bb2d768064a3f974276799654e408, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/bd084fcb284e4e11af809e2d78467437, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/f7c80a2e770b4cc5af945bff68ed632c] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp, totalSize=91.7 K 2024-12-17T12:38:27,808 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/cd69e0b600eb4c6bb4ef1cc76a0a7584, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/fa3f3d4960c74860b5a1747a44d74edb, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/2ba0812fff8c4d2aac25ffba353837e6] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp, totalSize=36.1 K 2024-12-17T12:38:27,808 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:27,808 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. files: [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/9c0bb2d768064a3f974276799654e408, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/bd084fcb284e4e11af809e2d78467437, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/f7c80a2e770b4cc5af945bff68ed632c] 2024-12-17T12:38:27,808 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting cd69e0b600eb4c6bb4ef1cc76a0a7584, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1734439102233 2024-12-17T12:38:27,808 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9c0bb2d768064a3f974276799654e408, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1734439102233 2024-12-17T12:38:27,809 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting bd084fcb284e4e11af809e2d78467437, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1734439102867 2024-12-17T12:38:27,809 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting fa3f3d4960c74860b5a1747a44d74edb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1734439102867 2024-12-17T12:38:27,809 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting f7c80a2e770b4cc5af945bff68ed632c, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=257, earliestPutTs=1734439104987 2024-12-17T12:38:27,809 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 2ba0812fff8c4d2aac25ffba353837e6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=257, earliestPutTs=1734439104987 2024-12-17T12:38:27,815 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1e405373380390a8eca5f807f91814d6#B#compaction#159 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:38:27,815 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/b7b31530161a4cd9833f9365d0e22b76 is 50, key is test_row_0/B:col10/1734439104998/Put/seqid=0 2024-12-17T12:38:27,816 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:27,820 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241217ffb8cbf9964c4a3e89acd33d71b23712_1e405373380390a8eca5f807f91814d6 store=[table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:27,821 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241217ffb8cbf9964c4a3e89acd33d71b23712_1e405373380390a8eca5f807f91814d6, store=[table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:27,821 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217ffb8cbf9964c4a3e89acd33d71b23712_1e405373380390a8eca5f807f91814d6 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:27,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742019_1195 (size=12815) 2024-12-17T12:38:27,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742020_1196 (size=4469) 2024-12-17T12:38:28,230 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/b7b31530161a4cd9833f9365d0e22b76 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/b7b31530161a4cd9833f9365d0e22b76 2024-12-17T12:38:28,231 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1e405373380390a8eca5f807f91814d6#A#compaction#160 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:38:28,232 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/4a2d6495838542b9ae7e091fe9d7b22b is 175, key is test_row_0/A:col10/1734439104998/Put/seqid=0 2024-12-17T12:38:28,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742021_1197 (size=31769) 2024-12-17T12:38:28,236 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1e405373380390a8eca5f807f91814d6/B of 1e405373380390a8eca5f807f91814d6 into b7b31530161a4cd9833f9365d0e22b76(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:38:28,236 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1e405373380390a8eca5f807f91814d6: 2024-12-17T12:38:28,237 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6., storeName=1e405373380390a8eca5f807f91814d6/B, priority=13, startTime=1734439107806; duration=0sec 2024-12-17T12:38:28,237 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:38:28,237 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1e405373380390a8eca5f807f91814d6:B 2024-12-17T12:38:28,237 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:38:28,240 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37015 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:38:28,240 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 1e405373380390a8eca5f807f91814d6/C is initiating minor compaction (all files) 2024-12-17T12:38:28,240 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1e405373380390a8eca5f807f91814d6/C in TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:28,240 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/07cfe1eaa6634426b218d472d13168be, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/8d5e5eda76b749e49a8484c7ccfd661b, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/78e28aeb891b43e4aca72cd10a0f245f] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp, totalSize=36.1 K 2024-12-17T12:38:28,241 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 07cfe1eaa6634426b218d472d13168be, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1734439102233 2024-12-17T12:38:28,241 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 8d5e5eda76b749e49a8484c7ccfd661b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1734439102867 2024-12-17T12:38:28,242 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 78e28aeb891b43e4aca72cd10a0f245f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=257, earliestPutTs=1734439104987 2024-12-17T12:38:28,243 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/4a2d6495838542b9ae7e091fe9d7b22b as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/4a2d6495838542b9ae7e091fe9d7b22b 2024-12-17T12:38:28,248 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1e405373380390a8eca5f807f91814d6/A of 1e405373380390a8eca5f807f91814d6 into 4a2d6495838542b9ae7e091fe9d7b22b(size=31.0 K), total size for store is 31.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:38:28,248 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1e405373380390a8eca5f807f91814d6: 2024-12-17T12:38:28,248 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6., storeName=1e405373380390a8eca5f807f91814d6/A, priority=13, startTime=1734439107806; duration=0sec 2024-12-17T12:38:28,248 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:28,248 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1e405373380390a8eca5f807f91814d6:A 2024-12-17T12:38:28,266 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1e405373380390a8eca5f807f91814d6#C#compaction#161 average throughput is 0.41 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:38:28,266 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/77ce45967c38417cb302080bbf78ce4e is 50, key is test_row_0/C:col10/1734439104998/Put/seqid=0 2024-12-17T12:38:28,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:28,268 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1e405373380390a8eca5f807f91814d6 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-17T12:38:28,268 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=A 2024-12-17T12:38:28,268 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:28,268 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=B 2024-12-17T12:38:28,268 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:28,268 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=C 2024-12-17T12:38:28,268 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:28,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742022_1198 (size=12815) 2024-12-17T12:38:28,285 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/77ce45967c38417cb302080bbf78ce4e as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/77ce45967c38417cb302080bbf78ce4e 2024-12-17T12:38:28,286 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:28,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439168282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:28,286 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:28,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439168284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:28,291 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1e405373380390a8eca5f807f91814d6/C of 1e405373380390a8eca5f807f91814d6 into 77ce45967c38417cb302080bbf78ce4e(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:38:28,291 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1e405373380390a8eca5f807f91814d6: 2024-12-17T12:38:28,291 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6., storeName=1e405373380390a8eca5f807f91814d6/C, priority=13, startTime=1734439107806; duration=0sec 2024-12-17T12:38:28,291 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:28,291 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1e405373380390a8eca5f807f91814d6:C 2024-12-17T12:38:28,291 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121720ccf7fd67c54dc08c15b7223238fe04_1e405373380390a8eca5f807f91814d6 is 50, key is test_row_0/A:col10/1734439106128/Put/seqid=0 2024-12-17T12:38:28,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742023_1199 (size=17534) 2024-12-17T12:38:28,308 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:28,311 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121720ccf7fd67c54dc08c15b7223238fe04_1e405373380390a8eca5f807f91814d6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121720ccf7fd67c54dc08c15b7223238fe04_1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:28,313 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/50396e8122874a02845dd108c5652ce5, store: [table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:28,313 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/50396e8122874a02845dd108c5652ce5 is 175, key is test_row_0/A:col10/1734439106128/Put/seqid=0 2024-12-17T12:38:28,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742024_1200 (size=48639) 2024-12-17T12:38:28,389 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:28,389 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:28,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439168387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:28,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439168387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:28,591 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:28,591 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:28,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439168591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:28,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439168591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:28,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-17T12:38:28,693 INFO [Thread-622 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 43 completed 2024-12-17T12:38:28,694 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-17T12:38:28,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees 2024-12-17T12:38:28,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-17T12:38:28,695 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-17T12:38:28,696 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-17T12:38:28,696 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-17T12:38:28,725 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=282, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/50396e8122874a02845dd108c5652ce5 2024-12-17T12:38:28,733 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/0f24d2b9a1c44be68a9624722f787402 is 50, key is test_row_0/B:col10/1734439106128/Put/seqid=0 2024-12-17T12:38:28,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742025_1201 (size=12301) 2024-12-17T12:38:28,737 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=282 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/0f24d2b9a1c44be68a9624722f787402 2024-12-17T12:38:28,743 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/a6f3d25a21894b56b1570d2d2ced9701 is 50, key is test_row_0/C:col10/1734439106128/Put/seqid=0 2024-12-17T12:38:28,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742026_1202 (size=12301) 2024-12-17T12:38:28,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-17T12:38:28,847 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:28,847 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-17T12:38:28,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:28,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:28,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:28,848 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:28,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:28,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:28,893 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:28,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439168892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:28,894 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:28,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439168894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:28,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-17T12:38:28,999 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:29,000 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-17T12:38:29,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:29,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:29,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:29,000 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:29,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:29,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:29,148 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=282 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/a6f3d25a21894b56b1570d2d2ced9701 2024-12-17T12:38:29,152 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:29,152 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-17T12:38:29,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:29,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:29,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:29,152 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:29,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:29,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:29,158 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/50396e8122874a02845dd108c5652ce5 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/50396e8122874a02845dd108c5652ce5 2024-12-17T12:38:29,162 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/50396e8122874a02845dd108c5652ce5, entries=250, sequenceid=282, filesize=47.5 K 2024-12-17T12:38:29,163 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/0f24d2b9a1c44be68a9624722f787402 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/0f24d2b9a1c44be68a9624722f787402 2024-12-17T12:38:29,166 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/0f24d2b9a1c44be68a9624722f787402, entries=150, sequenceid=282, filesize=12.0 K 2024-12-17T12:38:29,167 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/a6f3d25a21894b56b1570d2d2ced9701 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/a6f3d25a21894b56b1570d2d2ced9701 2024-12-17T12:38:29,171 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/a6f3d25a21894b56b1570d2d2ced9701, entries=150, sequenceid=282, filesize=12.0 K 2024-12-17T12:38:29,172 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for 1e405373380390a8eca5f807f91814d6 in 903ms, sequenceid=282, compaction requested=false 2024-12-17T12:38:29,172 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1e405373380390a8eca5f807f91814d6: 2024-12-17T12:38:29,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-17T12:38:29,304 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:29,304 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-17T12:38:29,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:29,305 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2837): Flushing 1e405373380390a8eca5f807f91814d6 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-17T12:38:29,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=A 2024-12-17T12:38:29,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:29,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=B 2024-12-17T12:38:29,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:29,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=C 2024-12-17T12:38:29,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:29,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217aeb8ef476b5046a5a7a717cc5e95cba1_1e405373380390a8eca5f807f91814d6 is 50, key is test_row_0/A:col10/1734439108281/Put/seqid=0 2024-12-17T12:38:29,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742027_1203 (size=12454) 2024-12-17T12:38:29,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:29,334 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217aeb8ef476b5046a5a7a717cc5e95cba1_1e405373380390a8eca5f807f91814d6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217aeb8ef476b5046a5a7a717cc5e95cba1_1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:29,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/10fe7e50df4b40af93a848990998f998, store: [table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:29,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/10fe7e50df4b40af93a848990998f998 is 175, key is test_row_0/A:col10/1734439108281/Put/seqid=0 2024-12-17T12:38:29,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742028_1204 (size=31255) 2024-12-17T12:38:29,342 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=297, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/10fe7e50df4b40af93a848990998f998 2024-12-17T12:38:29,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/638e4bd6bc244a099791dc65968f9bbc is 50, key is test_row_0/B:col10/1734439108281/Put/seqid=0 2024-12-17T12:38:29,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742029_1205 (size=12301) 2024-12-17T12:38:29,396 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:29,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:29,420 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:29,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439169419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:29,423 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:29,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439169420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:29,523 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:29,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439169521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:29,527 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:29,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439169524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:29,726 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:29,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439169725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:29,729 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:29,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439169729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:29,756 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=297 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/638e4bd6bc244a099791dc65968f9bbc 2024-12-17T12:38:29,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/51e1cc04c7cf4be78c0710cb74abf08f is 50, key is test_row_0/C:col10/1734439108281/Put/seqid=0 2024-12-17T12:38:29,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742030_1206 (size=12301) 2024-12-17T12:38:29,785 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:29,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42318 deadline: 1734439169783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:29,785 DEBUG [Thread-612 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8140 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6., hostname=681c08bfdbdf,36491,1734439058372, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor41.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-17T12:38:29,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-17T12:38:29,800 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:29,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42368 deadline: 1734439169797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:29,800 DEBUG [Thread-620 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8158 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6., hostname=681c08bfdbdf,36491,1734439058372, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor41.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-17T12:38:29,813 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:29,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42320 deadline: 1734439169812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:29,814 DEBUG [Thread-614 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8166 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6., hostname=681c08bfdbdf,36491,1734439058372, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor41.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-17T12:38:30,029 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:30,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439170028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:30,032 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:30,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439170030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:30,166 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=297 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/51e1cc04c7cf4be78c0710cb74abf08f 2024-12-17T12:38:30,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/10fe7e50df4b40af93a848990998f998 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/10fe7e50df4b40af93a848990998f998 2024-12-17T12:38:30,174 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/10fe7e50df4b40af93a848990998f998, entries=150, sequenceid=297, filesize=30.5 K 2024-12-17T12:38:30,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/638e4bd6bc244a099791dc65968f9bbc as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/638e4bd6bc244a099791dc65968f9bbc 2024-12-17T12:38:30,179 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/638e4bd6bc244a099791dc65968f9bbc, entries=150, sequenceid=297, filesize=12.0 K 2024-12-17T12:38:30,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/51e1cc04c7cf4be78c0710cb74abf08f as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/51e1cc04c7cf4be78c0710cb74abf08f 2024-12-17T12:38:30,184 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/51e1cc04c7cf4be78c0710cb74abf08f, entries=150, sequenceid=297, filesize=12.0 K 2024-12-17T12:38:30,185 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 1e405373380390a8eca5f807f91814d6 in 880ms, sequenceid=297, compaction requested=true 2024-12-17T12:38:30,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2538): Flush status journal for 1e405373380390a8eca5f807f91814d6: 2024-12-17T12:38:30,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:30,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=46 2024-12-17T12:38:30,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4106): Remote procedure done, pid=46 2024-12-17T12:38:30,187 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=46, resume processing ppid=45 2024-12-17T12:38:30,187 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, ppid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4900 sec 2024-12-17T12:38:30,188 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees in 1.4930 sec 2024-12-17T12:38:30,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:30,536 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1e405373380390a8eca5f807f91814d6 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-17T12:38:30,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=A 2024-12-17T12:38:30,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:30,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=B 2024-12-17T12:38:30,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:30,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=C 2024-12-17T12:38:30,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:30,543 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217cdb2339d7b1c4c52b8902de8c85a6383_1e405373380390a8eca5f807f91814d6 is 50, key is test_row_0/A:col10/1734439109418/Put/seqid=0 2024-12-17T12:38:30,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742031_1207 (size=17534) 2024-12-17T12:38:30,549 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:30,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439170548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:30,549 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:30,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439170548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:30,651 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:30,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439170650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:30,652 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:30,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439170650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:30,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-17T12:38:30,799 INFO [Thread-622 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 45 completed 2024-12-17T12:38:30,800 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-17T12:38:30,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees 2024-12-17T12:38:30,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-17T12:38:30,801 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-17T12:38:30,802 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-17T12:38:30,802 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=48, ppid=47, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-17T12:38:30,853 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:30,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439170853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:30,853 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:30,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439170853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:30,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-17T12:38:30,948 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:30,952 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217cdb2339d7b1c4c52b8902de8c85a6383_1e405373380390a8eca5f807f91814d6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217cdb2339d7b1c4c52b8902de8c85a6383_1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:30,952 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/0754388232754aaa86275861fb345cd8, store: [table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:30,953 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:30,953 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/0754388232754aaa86275861fb345cd8 is 175, key is test_row_0/A:col10/1734439109418/Put/seqid=0 2024-12-17T12:38:30,953 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-17T12:38:30,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:30,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:30,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:30,954 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:30,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:30,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:30,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742032_1208 (size=48639) 2024-12-17T12:38:31,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-17T12:38:31,105 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:31,106 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-17T12:38:31,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:31,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:31,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:31,106 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:31,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:31,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:31,154 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:31,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439171154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:31,155 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:31,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439171155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:31,257 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:31,257 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-17T12:38:31,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:31,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:31,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:31,258 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:31,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:31,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:31,357 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=321, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/0754388232754aaa86275861fb345cd8 2024-12-17T12:38:31,365 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/23458b861dba423e8becf646769454ec is 50, key is test_row_0/B:col10/1734439109418/Put/seqid=0 2024-12-17T12:38:31,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742033_1209 (size=12301) 2024-12-17T12:38:31,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-17T12:38:31,409 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:31,410 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-17T12:38:31,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:31,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:31,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:31,410 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:31,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:31,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:31,562 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:31,562 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-17T12:38:31,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:31,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:31,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:31,562 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:31,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:31,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:31,660 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:31,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439171659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:31,662 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:31,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439171660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:31,714 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:31,714 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-17T12:38:31,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:31,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:31,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:31,715 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:31,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:31,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:31,769 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=321 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/23458b861dba423e8becf646769454ec 2024-12-17T12:38:31,776 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/dc26afb57abc41bdbcb8813bd35f6c83 is 50, key is test_row_0/C:col10/1734439109418/Put/seqid=0 2024-12-17T12:38:31,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742034_1210 (size=12301) 2024-12-17T12:38:31,866 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:31,867 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-17T12:38:31,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:31,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:31,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:31,867 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:31,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:31,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:31,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-17T12:38:32,019 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:32,019 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-17T12:38:32,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:32,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:32,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:32,019 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:32,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:32,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:32,171 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:32,171 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-17T12:38:32,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:32,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:32,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:32,172 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:32,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:32,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:32,180 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=321 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/dc26afb57abc41bdbcb8813bd35f6c83 2024-12-17T12:38:32,184 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/0754388232754aaa86275861fb345cd8 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/0754388232754aaa86275861fb345cd8 2024-12-17T12:38:32,189 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/0754388232754aaa86275861fb345cd8, entries=250, sequenceid=321, filesize=47.5 K 2024-12-17T12:38:32,190 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/23458b861dba423e8becf646769454ec as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/23458b861dba423e8becf646769454ec 2024-12-17T12:38:32,195 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/23458b861dba423e8becf646769454ec, entries=150, sequenceid=321, filesize=12.0 K 2024-12-17T12:38:32,196 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/dc26afb57abc41bdbcb8813bd35f6c83 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/dc26afb57abc41bdbcb8813bd35f6c83 2024-12-17T12:38:32,200 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/dc26afb57abc41bdbcb8813bd35f6c83, entries=150, sequenceid=321, filesize=12.0 K 2024-12-17T12:38:32,201 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 1e405373380390a8eca5f807f91814d6 in 1666ms, sequenceid=321, compaction requested=true 2024-12-17T12:38:32,201 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1e405373380390a8eca5f807f91814d6: 2024-12-17T12:38:32,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1e405373380390a8eca5f807f91814d6:A, priority=-2147483648, current under compaction store size is 1 2024-12-17T12:38:32,201 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-17T12:38:32,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:32,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1e405373380390a8eca5f807f91814d6:B, priority=-2147483648, current under compaction store size is 2 2024-12-17T12:38:32,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:32,201 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-17T12:38:32,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1e405373380390a8eca5f807f91814d6:C, priority=-2147483648, current under compaction store size is 3 2024-12-17T12:38:32,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:38:32,202 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49718 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-17T12:38:32,202 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 160302 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-17T12:38:32,203 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): 1e405373380390a8eca5f807f91814d6/A is initiating minor compaction (all files) 2024-12-17T12:38:32,203 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 1e405373380390a8eca5f807f91814d6/B is initiating minor compaction (all files) 2024-12-17T12:38:32,203 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1e405373380390a8eca5f807f91814d6/B in TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:32,203 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1e405373380390a8eca5f807f91814d6/A in TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:32,203 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/4a2d6495838542b9ae7e091fe9d7b22b, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/50396e8122874a02845dd108c5652ce5, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/10fe7e50df4b40af93a848990998f998, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/0754388232754aaa86275861fb345cd8] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp, totalSize=156.5 K 2024-12-17T12:38:32,203 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/b7b31530161a4cd9833f9365d0e22b76, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/0f24d2b9a1c44be68a9624722f787402, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/638e4bd6bc244a099791dc65968f9bbc, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/23458b861dba423e8becf646769454ec] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp, totalSize=48.6 K 2024-12-17T12:38:32,203 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:32,203 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. files: [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/4a2d6495838542b9ae7e091fe9d7b22b, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/50396e8122874a02845dd108c5652ce5, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/10fe7e50df4b40af93a848990998f998, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/0754388232754aaa86275861fb345cd8] 2024-12-17T12:38:32,203 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting b7b31530161a4cd9833f9365d0e22b76, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=257, earliestPutTs=1734439104987 2024-12-17T12:38:32,203 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4a2d6495838542b9ae7e091fe9d7b22b, keycount=150, bloomtype=ROW, size=31.0 K, encoding=NONE, compression=NONE, seqNum=257, earliestPutTs=1734439104987 2024-12-17T12:38:32,203 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 0f24d2b9a1c44be68a9624722f787402, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1734439106128 2024-12-17T12:38:32,203 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 50396e8122874a02845dd108c5652ce5, keycount=250, bloomtype=ROW, size=47.5 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1734439106128 2024-12-17T12:38:32,204 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 638e4bd6bc244a099791dc65968f9bbc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=297, earliestPutTs=1734439108277 2024-12-17T12:38:32,204 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 10fe7e50df4b40af93a848990998f998, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=297, earliestPutTs=1734439108277 2024-12-17T12:38:32,204 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 23458b861dba423e8becf646769454ec, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=321, earliestPutTs=1734439109418 2024-12-17T12:38:32,204 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0754388232754aaa86275861fb345cd8, keycount=250, bloomtype=ROW, size=47.5 K, encoding=NONE, compression=NONE, seqNum=321, earliestPutTs=1734439109417 2024-12-17T12:38:32,210 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:32,213 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1e405373380390a8eca5f807f91814d6#B#compaction#172 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:38:32,213 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/6b6e74df6ce44b39a61933d616eca2d5 is 50, key is test_row_0/B:col10/1734439109418/Put/seqid=0 2024-12-17T12:38:32,216 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412175280bb49b680481a8ce1401f00e3829f_1e405373380390a8eca5f807f91814d6 store=[table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:32,218 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412175280bb49b680481a8ce1401f00e3829f_1e405373380390a8eca5f807f91814d6, store=[table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:32,218 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412175280bb49b680481a8ce1401f00e3829f_1e405373380390a8eca5f807f91814d6 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:32,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742035_1211 (size=13051) 2024-12-17T12:38:32,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742036_1212 (size=4469) 2024-12-17T12:38:32,323 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:32,324 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-17T12:38:32,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:32,324 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2837): Flushing 1e405373380390a8eca5f807f91814d6 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-17T12:38:32,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=A 2024-12-17T12:38:32,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:32,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=B 2024-12-17T12:38:32,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:32,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=C 2024-12-17T12:38:32,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:32,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121708247e54dced4a6e83ff1c9e2890a41f_1e405373380390a8eca5f807f91814d6 is 50, key is test_row_0/A:col10/1734439110547/Put/seqid=0 2024-12-17T12:38:32,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742037_1213 (size=12454) 2024-12-17T12:38:32,626 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/6b6e74df6ce44b39a61933d616eca2d5 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/6b6e74df6ce44b39a61933d616eca2d5 2024-12-17T12:38:32,630 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1e405373380390a8eca5f807f91814d6#A#compaction#171 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:38:32,630 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/4acae8bf2fd1420f9c858b0930853a36 is 175, key is test_row_0/A:col10/1734439109418/Put/seqid=0 2024-12-17T12:38:32,630 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1e405373380390a8eca5f807f91814d6/B of 1e405373380390a8eca5f807f91814d6 into 6b6e74df6ce44b39a61933d616eca2d5(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:38:32,631 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1e405373380390a8eca5f807f91814d6: 2024-12-17T12:38:32,631 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6., storeName=1e405373380390a8eca5f807f91814d6/B, priority=12, startTime=1734439112201; duration=0sec 2024-12-17T12:38:32,631 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:38:32,631 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1e405373380390a8eca5f807f91814d6:B 2024-12-17T12:38:32,631 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-17T12:38:32,632 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49718 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-17T12:38:32,632 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 1e405373380390a8eca5f807f91814d6/C is initiating minor compaction (all files) 2024-12-17T12:38:32,632 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1e405373380390a8eca5f807f91814d6/C in TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:32,633 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/77ce45967c38417cb302080bbf78ce4e, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/a6f3d25a21894b56b1570d2d2ced9701, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/51e1cc04c7cf4be78c0710cb74abf08f, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/dc26afb57abc41bdbcb8813bd35f6c83] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp, totalSize=48.6 K 2024-12-17T12:38:32,633 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 77ce45967c38417cb302080bbf78ce4e, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=257, earliestPutTs=1734439104987 2024-12-17T12:38:32,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742038_1214 (size=32005) 2024-12-17T12:38:32,634 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting a6f3d25a21894b56b1570d2d2ced9701, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1734439106128 2024-12-17T12:38:32,634 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 51e1cc04c7cf4be78c0710cb74abf08f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=297, earliestPutTs=1734439108277 2024-12-17T12:38:32,634 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting dc26afb57abc41bdbcb8813bd35f6c83, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=321, earliestPutTs=1734439109418 2024-12-17T12:38:32,639 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/4acae8bf2fd1420f9c858b0930853a36 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/4acae8bf2fd1420f9c858b0930853a36 2024-12-17T12:38:32,644 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1e405373380390a8eca5f807f91814d6/A of 1e405373380390a8eca5f807f91814d6 into 4acae8bf2fd1420f9c858b0930853a36(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:38:32,644 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1e405373380390a8eca5f807f91814d6: 2024-12-17T12:38:32,644 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6., storeName=1e405373380390a8eca5f807f91814d6/A, priority=12, startTime=1734439112201; duration=0sec 2024-12-17T12:38:32,644 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:32,644 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1e405373380390a8eca5f807f91814d6:A 2024-12-17T12:38:32,645 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1e405373380390a8eca5f807f91814d6#C#compaction#174 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:38:32,646 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/9e809a2548c44593bf241ec423af0785 is 50, key is test_row_0/C:col10/1734439109418/Put/seqid=0 2024-12-17T12:38:32,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742039_1215 (size=13051) 2024-12-17T12:38:32,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:32,667 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:32,673 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/9e809a2548c44593bf241ec423af0785 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/9e809a2548c44593bf241ec423af0785 2024-12-17T12:38:32,680 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1e405373380390a8eca5f807f91814d6/C of 1e405373380390a8eca5f807f91814d6 into 9e809a2548c44593bf241ec423af0785(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:38:32,680 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1e405373380390a8eca5f807f91814d6: 2024-12-17T12:38:32,680 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6., storeName=1e405373380390a8eca5f807f91814d6/C, priority=12, startTime=1734439112201; duration=0sec 2024-12-17T12:38:32,680 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:32,680 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1e405373380390a8eca5f807f91814d6:C 2024-12-17T12:38:32,694 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:32,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439172692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:32,694 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:32,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439172694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:32,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:32,740 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121708247e54dced4a6e83ff1c9e2890a41f_1e405373380390a8eca5f807f91814d6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121708247e54dced4a6e83ff1c9e2890a41f_1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:32,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/727affdbb1bf44099287891ef3f19883, store: [table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:32,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/727affdbb1bf44099287891ef3f19883 is 175, key is test_row_0/A:col10/1734439110547/Put/seqid=0 2024-12-17T12:38:32,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742040_1216 (size=31255) 2024-12-17T12:38:32,746 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=333, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/727affdbb1bf44099287891ef3f19883 2024-12-17T12:38:32,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/420f175ce29b47fc87e70a14f3dc5076 is 50, key is test_row_0/B:col10/1734439110547/Put/seqid=0 2024-12-17T12:38:32,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742041_1217 (size=12301) 2024-12-17T12:38:32,795 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:32,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439172795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:32,796 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:32,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439172795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:32,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-17T12:38:32,999 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:32,999 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:32,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439172998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:32,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439172998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:33,169 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=333 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/420f175ce29b47fc87e70a14f3dc5076 2024-12-17T12:38:33,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/b86c111f18b04a99be38edc9d6a66198 is 50, key is test_row_0/C:col10/1734439110547/Put/seqid=0 2024-12-17T12:38:33,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742042_1218 (size=12301) 2024-12-17T12:38:33,301 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:33,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439173300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:33,303 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:33,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439173302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:33,581 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=333 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/b86c111f18b04a99be38edc9d6a66198 2024-12-17T12:38:33,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/727affdbb1bf44099287891ef3f19883 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/727affdbb1bf44099287891ef3f19883 2024-12-17T12:38:33,597 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/727affdbb1bf44099287891ef3f19883, entries=150, sequenceid=333, filesize=30.5 K 2024-12-17T12:38:33,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/420f175ce29b47fc87e70a14f3dc5076 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/420f175ce29b47fc87e70a14f3dc5076 2024-12-17T12:38:33,601 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/420f175ce29b47fc87e70a14f3dc5076, entries=150, sequenceid=333, filesize=12.0 K 2024-12-17T12:38:33,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/b86c111f18b04a99be38edc9d6a66198 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/b86c111f18b04a99be38edc9d6a66198 2024-12-17T12:38:33,606 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/b86c111f18b04a99be38edc9d6a66198, entries=150, sequenceid=333, filesize=12.0 K 2024-12-17T12:38:33,608 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 1e405373380390a8eca5f807f91814d6 in 1284ms, sequenceid=333, compaction requested=false 2024-12-17T12:38:33,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2538): Flush status journal for 1e405373380390a8eca5f807f91814d6: 2024-12-17T12:38:33,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:33,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=48 2024-12-17T12:38:33,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4106): Remote procedure done, pid=48 2024-12-17T12:38:33,610 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=48, resume processing ppid=47 2024-12-17T12:38:33,610 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.8070 sec 2024-12-17T12:38:33,611 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees in 2.8100 sec 2024-12-17T12:38:33,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:33,804 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1e405373380390a8eca5f807f91814d6 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-17T12:38:33,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=A 2024-12-17T12:38:33,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:33,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=B 2024-12-17T12:38:33,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:33,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=C 2024-12-17T12:38:33,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:33,810 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217b32ea4e78e114b44aeaf94ccd695904a_1e405373380390a8eca5f807f91814d6 is 50, key is test_row_0/A:col10/1734439112691/Put/seqid=0 2024-12-17T12:38:33,820 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:33,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439173818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:33,822 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:33,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439173820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:33,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742043_1219 (size=14994) 2024-12-17T12:38:33,830 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:33,834 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217b32ea4e78e114b44aeaf94ccd695904a_1e405373380390a8eca5f807f91814d6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217b32ea4e78e114b44aeaf94ccd695904a_1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:33,835 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/d3252e5c7e70476ba6a1b5ab7bbf900a, store: [table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:33,835 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/d3252e5c7e70476ba6a1b5ab7bbf900a is 175, key is test_row_0/A:col10/1734439112691/Put/seqid=0 2024-12-17T12:38:33,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742044_1220 (size=39949) 2024-12-17T12:38:33,848 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=361, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/d3252e5c7e70476ba6a1b5ab7bbf900a 2024-12-17T12:38:33,858 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/8ec0514762084a9292d61233da8ea56c is 50, key is test_row_0/B:col10/1734439112691/Put/seqid=0 2024-12-17T12:38:33,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742045_1221 (size=12301) 2024-12-17T12:38:33,921 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:33,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439173921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:33,925 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:33,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439173923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:34,123 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:34,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439174122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:34,128 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:34,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439174126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:34,263 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=361 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/8ec0514762084a9292d61233da8ea56c 2024-12-17T12:38:34,269 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/586bc4ac9f114bdbb74b317f4ce1d3d5 is 50, key is test_row_0/C:col10/1734439112691/Put/seqid=0 2024-12-17T12:38:34,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742046_1222 (size=12301) 2024-12-17T12:38:34,425 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:34,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439174424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:34,431 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:34,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439174430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:34,673 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=361 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/586bc4ac9f114bdbb74b317f4ce1d3d5 2024-12-17T12:38:34,677 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/d3252e5c7e70476ba6a1b5ab7bbf900a as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/d3252e5c7e70476ba6a1b5ab7bbf900a 2024-12-17T12:38:34,681 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/d3252e5c7e70476ba6a1b5ab7bbf900a, entries=200, sequenceid=361, filesize=39.0 K 2024-12-17T12:38:34,682 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/8ec0514762084a9292d61233da8ea56c as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/8ec0514762084a9292d61233da8ea56c 2024-12-17T12:38:34,686 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/8ec0514762084a9292d61233da8ea56c, entries=150, sequenceid=361, filesize=12.0 K 2024-12-17T12:38:34,687 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/586bc4ac9f114bdbb74b317f4ce1d3d5 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/586bc4ac9f114bdbb74b317f4ce1d3d5 2024-12-17T12:38:34,691 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/586bc4ac9f114bdbb74b317f4ce1d3d5, entries=150, sequenceid=361, filesize=12.0 K 2024-12-17T12:38:34,691 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 1e405373380390a8eca5f807f91814d6 in 887ms, sequenceid=361, compaction requested=true 2024-12-17T12:38:34,692 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1e405373380390a8eca5f807f91814d6: 2024-12-17T12:38:34,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1e405373380390a8eca5f807f91814d6:A, priority=-2147483648, current under compaction store size is 1 2024-12-17T12:38:34,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:34,692 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:38:34,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1e405373380390a8eca5f807f91814d6:B, priority=-2147483648, current under compaction store size is 2 2024-12-17T12:38:34,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:34,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1e405373380390a8eca5f807f91814d6:C, priority=-2147483648, current under compaction store size is 3 2024-12-17T12:38:34,692 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:38:34,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:38:34,693 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103209 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:38:34,693 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:38:34,693 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): 1e405373380390a8eca5f807f91814d6/A is initiating minor compaction (all files) 2024-12-17T12:38:34,693 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 1e405373380390a8eca5f807f91814d6/B is initiating minor compaction (all files) 2024-12-17T12:38:34,693 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1e405373380390a8eca5f807f91814d6/A in TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:34,693 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1e405373380390a8eca5f807f91814d6/B in TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:34,693 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/4acae8bf2fd1420f9c858b0930853a36, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/727affdbb1bf44099287891ef3f19883, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/d3252e5c7e70476ba6a1b5ab7bbf900a] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp, totalSize=100.8 K 2024-12-17T12:38:34,693 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/6b6e74df6ce44b39a61933d616eca2d5, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/420f175ce29b47fc87e70a14f3dc5076, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/8ec0514762084a9292d61233da8ea56c] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp, totalSize=36.8 K 2024-12-17T12:38:34,693 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:34,693 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. files: [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/4acae8bf2fd1420f9c858b0930853a36, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/727affdbb1bf44099287891ef3f19883, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/d3252e5c7e70476ba6a1b5ab7bbf900a] 2024-12-17T12:38:34,693 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 6b6e74df6ce44b39a61933d616eca2d5, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=321, earliestPutTs=1734439109418 2024-12-17T12:38:34,693 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4acae8bf2fd1420f9c858b0930853a36, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=321, earliestPutTs=1734439109418 2024-12-17T12:38:34,693 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 420f175ce29b47fc87e70a14f3dc5076, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1734439110543 2024-12-17T12:38:34,694 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 727affdbb1bf44099287891ef3f19883, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1734439110543 2024-12-17T12:38:34,694 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 8ec0514762084a9292d61233da8ea56c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=361, earliestPutTs=1734439112691 2024-12-17T12:38:34,694 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting d3252e5c7e70476ba6a1b5ab7bbf900a, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=361, earliestPutTs=1734439112690 2024-12-17T12:38:34,699 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:34,708 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412171fb83321a2ca4567a94ebfee7472d040_1e405373380390a8eca5f807f91814d6 store=[table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:34,709 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1e405373380390a8eca5f807f91814d6#B#compaction#181 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:38:34,709 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/6d0606f959474d948e86105617d29a03 is 50, key is test_row_0/B:col10/1734439112691/Put/seqid=0 2024-12-17T12:38:34,710 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412171fb83321a2ca4567a94ebfee7472d040_1e405373380390a8eca5f807f91814d6, store=[table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:34,710 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412171fb83321a2ca4567a94ebfee7472d040_1e405373380390a8eca5f807f91814d6 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:34,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742047_1223 (size=13153) 2024-12-17T12:38:34,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742048_1224 (size=4469) 2024-12-17T12:38:34,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-17T12:38:34,905 INFO [Thread-622 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 47 completed 2024-12-17T12:38:34,906 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-17T12:38:34,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees 2024-12-17T12:38:34,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-17T12:38:34,907 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-17T12:38:34,907 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-17T12:38:34,907 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-17T12:38:34,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:34,930 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1e405373380390a8eca5f807f91814d6 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-17T12:38:34,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=A 2024-12-17T12:38:34,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:34,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=B 2024-12-17T12:38:34,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:34,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=C 2024-12-17T12:38:34,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:34,936 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217ced54d07237a43c3b883749a43fd1d52_1e405373380390a8eca5f807f91814d6 is 50, key is test_row_0/A:col10/1734439113814/Put/seqid=0 2024-12-17T12:38:34,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742049_1225 (size=14994) 2024-12-17T12:38:34,941 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:34,944 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217ced54d07237a43c3b883749a43fd1d52_1e405373380390a8eca5f807f91814d6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217ced54d07237a43c3b883749a43fd1d52_1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:34,945 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/e918c70eefae4a71a2ea529f0b452538, store: [table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:34,945 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/e918c70eefae4a71a2ea529f0b452538 is 175, key is test_row_0/A:col10/1734439113814/Put/seqid=0 2024-12-17T12:38:34,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742050_1226 (size=39949) 2024-12-17T12:38:34,949 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=374, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/e918c70eefae4a71a2ea529f0b452538 2024-12-17T12:38:34,962 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/b1fbe910599f40d7906e2b6b516a31d2 is 50, key is test_row_0/B:col10/1734439113814/Put/seqid=0 2024-12-17T12:38:34,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742051_1227 (size=12301) 2024-12-17T12:38:34,969 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:34,969 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:34,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439174966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:34,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439174966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:35,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-17T12:38:35,059 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:35,059 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-17T12:38:35,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:35,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:35,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:35,059 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:35,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:35,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:35,072 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:35,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439175070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:35,072 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:35,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439175070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:35,125 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/6d0606f959474d948e86105617d29a03 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/6d0606f959474d948e86105617d29a03 2024-12-17T12:38:35,130 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1e405373380390a8eca5f807f91814d6#A#compaction#180 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:38:35,130 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/15dcd290250e4c5688c69e615b278539 is 175, key is test_row_0/A:col10/1734439112691/Put/seqid=0 2024-12-17T12:38:35,131 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1e405373380390a8eca5f807f91814d6/B of 1e405373380390a8eca5f807f91814d6 into 6d0606f959474d948e86105617d29a03(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:38:35,131 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1e405373380390a8eca5f807f91814d6: 2024-12-17T12:38:35,131 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6., storeName=1e405373380390a8eca5f807f91814d6/B, priority=13, startTime=1734439114692; duration=0sec 2024-12-17T12:38:35,131 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:38:35,131 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1e405373380390a8eca5f807f91814d6:B 2024-12-17T12:38:35,131 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:38:35,133 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:38:35,133 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 1e405373380390a8eca5f807f91814d6/C is initiating minor compaction (all files) 2024-12-17T12:38:35,133 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1e405373380390a8eca5f807f91814d6/C in TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:35,133 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/9e809a2548c44593bf241ec423af0785, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/b86c111f18b04a99be38edc9d6a66198, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/586bc4ac9f114bdbb74b317f4ce1d3d5] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp, totalSize=36.8 K 2024-12-17T12:38:35,133 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 9e809a2548c44593bf241ec423af0785, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=321, earliestPutTs=1734439109418 2024-12-17T12:38:35,134 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting b86c111f18b04a99be38edc9d6a66198, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1734439110543 2024-12-17T12:38:35,134 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 586bc4ac9f114bdbb74b317f4ce1d3d5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=361, earliestPutTs=1734439112691 2024-12-17T12:38:35,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742052_1228 (size=32107) 2024-12-17T12:38:35,142 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1e405373380390a8eca5f807f91814d6#C#compaction#184 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:38:35,142 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/e6b726d5ca4241f28267f6036c34c13c is 50, key is test_row_0/C:col10/1734439112691/Put/seqid=0 2024-12-17T12:38:35,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742053_1229 (size=13153) 2024-12-17T12:38:35,156 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/15dcd290250e4c5688c69e615b278539 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/15dcd290250e4c5688c69e615b278539 2024-12-17T12:38:35,160 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1e405373380390a8eca5f807f91814d6/A of 1e405373380390a8eca5f807f91814d6 into 15dcd290250e4c5688c69e615b278539(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:38:35,160 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1e405373380390a8eca5f807f91814d6: 2024-12-17T12:38:35,160 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6., storeName=1e405373380390a8eca5f807f91814d6/A, priority=13, startTime=1734439114692; duration=0sec 2024-12-17T12:38:35,160 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:35,160 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1e405373380390a8eca5f807f91814d6:A 2024-12-17T12:38:35,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-17T12:38:35,211 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:35,211 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-17T12:38:35,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:35,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:35,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:35,212 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:35,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:35,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:35,274 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:35,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439175273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:35,274 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:35,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439175273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:35,363 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:35,364 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-17T12:38:35,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:35,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:35,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:35,364 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:35,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:35,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:35,365 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=374 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/b1fbe910599f40d7906e2b6b516a31d2 2024-12-17T12:38:35,372 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/51f77ae4852942febcd962b96314610a is 50, key is test_row_0/C:col10/1734439113814/Put/seqid=0 2024-12-17T12:38:35,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742054_1230 (size=12301) 2024-12-17T12:38:35,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-17T12:38:35,515 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:35,515 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-17T12:38:35,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:35,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:35,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:35,515 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:35,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:35,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:35,552 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/e6b726d5ca4241f28267f6036c34c13c as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/e6b726d5ca4241f28267f6036c34c13c 2024-12-17T12:38:35,556 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1e405373380390a8eca5f807f91814d6/C of 1e405373380390a8eca5f807f91814d6 into e6b726d5ca4241f28267f6036c34c13c(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:38:35,556 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1e405373380390a8eca5f807f91814d6: 2024-12-17T12:38:35,556 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6., storeName=1e405373380390a8eca5f807f91814d6/C, priority=13, startTime=1734439114692; duration=0sec 2024-12-17T12:38:35,557 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:35,557 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1e405373380390a8eca5f807f91814d6:C 2024-12-17T12:38:35,576 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:35,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439175576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:35,577 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:35,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439175577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:35,667 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:35,668 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-17T12:38:35,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:35,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:35,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:35,668 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:35,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:35,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:35,776 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=374 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/51f77ae4852942febcd962b96314610a 2024-12-17T12:38:35,780 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/e918c70eefae4a71a2ea529f0b452538 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/e918c70eefae4a71a2ea529f0b452538 2024-12-17T12:38:35,784 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/e918c70eefae4a71a2ea529f0b452538, entries=200, sequenceid=374, filesize=39.0 K 2024-12-17T12:38:35,785 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/b1fbe910599f40d7906e2b6b516a31d2 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/b1fbe910599f40d7906e2b6b516a31d2 2024-12-17T12:38:35,788 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/b1fbe910599f40d7906e2b6b516a31d2, entries=150, sequenceid=374, filesize=12.0 K 2024-12-17T12:38:35,789 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/51f77ae4852942febcd962b96314610a as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/51f77ae4852942febcd962b96314610a 2024-12-17T12:38:35,793 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/51f77ae4852942febcd962b96314610a, entries=150, sequenceid=374, filesize=12.0 K 2024-12-17T12:38:35,794 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 1e405373380390a8eca5f807f91814d6 in 863ms, sequenceid=374, compaction requested=false 2024-12-17T12:38:35,794 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1e405373380390a8eca5f807f91814d6: 2024-12-17T12:38:35,820 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:35,820 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-17T12:38:35,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:35,820 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2837): Flushing 1e405373380390a8eca5f807f91814d6 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-17T12:38:35,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=A 2024-12-17T12:38:35,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:35,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=B 2024-12-17T12:38:35,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:35,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=C 2024-12-17T12:38:35,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:35,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217af4122011bea4023b8462373b0bbd488_1e405373380390a8eca5f807f91814d6 is 50, key is test_row_0/A:col10/1734439114961/Put/seqid=0 2024-12-17T12:38:35,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742055_1231 (size=12454) 2024-12-17T12:38:36,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-17T12:38:36,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:36,081 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. as already flushing 2024-12-17T12:38:36,093 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:36,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439176092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:36,096 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:36,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439176093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:36,196 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:36,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439176194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:36,199 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:36,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439176197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:36,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:36,234 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217af4122011bea4023b8462373b0bbd488_1e405373380390a8eca5f807f91814d6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217af4122011bea4023b8462373b0bbd488_1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:36,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/9ce07be61ddb4828a0ab9b3409ff0990, store: [table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:36,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/9ce07be61ddb4828a0ab9b3409ff0990 is 175, key is test_row_0/A:col10/1734439114961/Put/seqid=0 2024-12-17T12:38:36,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742056_1232 (size=31255) 2024-12-17T12:38:36,398 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:36,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439176397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:36,402 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:36,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439176401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:36,471 DEBUG [Thread-625 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3bb8b26c to 127.0.0.1:59557 2024-12-17T12:38:36,471 DEBUG [Thread-625 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:38:36,471 DEBUG [Thread-627 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x32239a70 to 127.0.0.1:59557 2024-12-17T12:38:36,471 DEBUG [Thread-627 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:38:36,472 DEBUG [Thread-629 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6b3821ad to 127.0.0.1:59557 2024-12-17T12:38:36,472 DEBUG [Thread-629 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:38:36,472 DEBUG [Thread-623 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6f6b07e3 to 127.0.0.1:59557 2024-12-17T12:38:36,472 DEBUG [Thread-623 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:38:36,640 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=400, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/9ce07be61ddb4828a0ab9b3409ff0990 2024-12-17T12:38:36,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/cf2ec8cc0a3c423681b71a9b7c8c612d is 50, key is test_row_0/B:col10/1734439114961/Put/seqid=0 2024-12-17T12:38:36,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742057_1233 (size=12301) 2024-12-17T12:38:36,700 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:36,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439176700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:36,706 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:36,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439176706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:36,884 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-17T12:38:37,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-17T12:38:37,059 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=400 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/cf2ec8cc0a3c423681b71a9b7c8c612d 2024-12-17T12:38:37,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/343881dd273c4c2a8eddba3991d9691d is 50, key is test_row_0/C:col10/1734439114961/Put/seqid=0 2024-12-17T12:38:37,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742058_1234 (size=12301) 2024-12-17T12:38:37,204 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:37,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42346 deadline: 1734439177204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:37,210 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:37,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42374 deadline: 1734439177210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:37,478 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=400 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/343881dd273c4c2a8eddba3991d9691d 2024-12-17T12:38:37,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/9ce07be61ddb4828a0ab9b3409ff0990 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/9ce07be61ddb4828a0ab9b3409ff0990 2024-12-17T12:38:37,494 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/9ce07be61ddb4828a0ab9b3409ff0990, entries=150, sequenceid=400, filesize=30.5 K 2024-12-17T12:38:37,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/cf2ec8cc0a3c423681b71a9b7c8c612d as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/cf2ec8cc0a3c423681b71a9b7c8c612d 2024-12-17T12:38:37,501 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/cf2ec8cc0a3c423681b71a9b7c8c612d, entries=150, sequenceid=400, filesize=12.0 K 2024-12-17T12:38:37,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/343881dd273c4c2a8eddba3991d9691d as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/343881dd273c4c2a8eddba3991d9691d 2024-12-17T12:38:37,508 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/343881dd273c4c2a8eddba3991d9691d, entries=150, sequenceid=400, filesize=12.0 K 2024-12-17T12:38:37,508 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 1e405373380390a8eca5f807f91814d6 in 1688ms, sequenceid=400, compaction requested=true 2024-12-17T12:38:37,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2538): Flush status journal for 1e405373380390a8eca5f807f91814d6: 2024-12-17T12:38:37,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:37,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=50 2024-12-17T12:38:37,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4106): Remote procedure done, pid=50 2024-12-17T12:38:37,510 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-12-17T12:38:37,510 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6020 sec 2024-12-17T12:38:37,511 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees in 2.6040 sec 2024-12-17T12:38:38,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:38,210 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1e405373380390a8eca5f807f91814d6 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-17T12:38:38,211 DEBUG [Thread-616 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x10c964e8 to 127.0.0.1:59557 2024-12-17T12:38:38,211 DEBUG [Thread-616 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:38:38,211 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=A 2024-12-17T12:38:38,211 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:38,211 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=B 2024-12-17T12:38:38,211 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:38,212 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=C 2024-12-17T12:38:38,212 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:38,219 DEBUG [Thread-618 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3c3b736e to 127.0.0.1:59557 2024-12-17T12:38:38,219 DEBUG [Thread-618 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:38:38,223 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217635023de6ece490190707f23a27b6415_1e405373380390a8eca5f807f91814d6 is 50, key is test_row_0/A:col10/1734439116092/Put/seqid=0 2024-12-17T12:38:38,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742059_1235 (size=12454) 2024-12-17T12:38:38,627 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:38,630 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217635023de6ece490190707f23a27b6415_1e405373380390a8eca5f807f91814d6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217635023de6ece490190707f23a27b6415_1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:38,631 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/dc77b5c2c10243debb31ec725972b5bb, store: [table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:38,632 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/dc77b5c2c10243debb31ec725972b5bb is 175, key is test_row_0/A:col10/1734439116092/Put/seqid=0 2024-12-17T12:38:38,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742060_1236 (size=31255) 2024-12-17T12:38:39,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-17T12:38:39,014 INFO [Thread-622 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 49 completed 2024-12-17T12:38:39,037 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=414, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/dc77b5c2c10243debb31ec725972b5bb 2024-12-17T12:38:39,052 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/156f752b2e1a446a99f8ad989cf70731 is 50, key is test_row_0/B:col10/1734439116092/Put/seqid=0 2024-12-17T12:38:39,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742061_1237 (size=12301) 2024-12-17T12:38:39,457 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=414 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/156f752b2e1a446a99f8ad989cf70731 2024-12-17T12:38:39,472 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/aec2c35fa1154b87841aca4e0a467873 is 50, key is test_row_0/C:col10/1734439116092/Put/seqid=0 2024-12-17T12:38:39,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742062_1238 (size=12301) 2024-12-17T12:38:39,840 DEBUG [Thread-620 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1d2a8e08 to 127.0.0.1:59557 2024-12-17T12:38:39,840 DEBUG [Thread-620 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:38:39,866 DEBUG [Thread-614 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x048068a5 to 127.0.0.1:59557 2024-12-17T12:38:39,866 DEBUG [Thread-614 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:38:39,877 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=414 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/aec2c35fa1154b87841aca4e0a467873 2024-12-17T12:38:39,888 DEBUG [Thread-612 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x478bae6b to 127.0.0.1:59557 2024-12-17T12:38:39,888 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/dc77b5c2c10243debb31ec725972b5bb as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/dc77b5c2c10243debb31ec725972b5bb 2024-12-17T12:38:39,888 DEBUG [Thread-612 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:38:39,888 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-17T12:38:39,888 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 39 2024-12-17T12:38:39,888 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 42 2024-12-17T12:38:39,889 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 110 2024-12-17T12:38:39,889 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 100 2024-12-17T12:38:39,889 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 36 2024-12-17T12:38:39,889 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-17T12:38:39,889 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8493 2024-12-17T12:38:39,889 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8317 2024-12-17T12:38:39,889 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-17T12:38:39,889 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3602 2024-12-17T12:38:39,889 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 10806 rows 2024-12-17T12:38:39,889 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3636 2024-12-17T12:38:39,889 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 10907 rows 2024-12-17T12:38:39,889 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-17T12:38:39,889 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x083eb3a5 to 127.0.0.1:59557 2024-12-17T12:38:39,889 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:38:39,892 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-17T12:38:39,893 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-17T12:38:39,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=51, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-17T12:38:39,895 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/dc77b5c2c10243debb31ec725972b5bb, entries=150, sequenceid=414, filesize=30.5 K 2024-12-17T12:38:39,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-17T12:38:39,897 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734439119896"}]},"ts":"1734439119896"} 2024-12-17T12:38:39,897 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/156f752b2e1a446a99f8ad989cf70731 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/156f752b2e1a446a99f8ad989cf70731 2024-12-17T12:38:39,898 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-17T12:38:39,901 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/156f752b2e1a446a99f8ad989cf70731, entries=150, sequenceid=414, filesize=12.0 K 2024-12-17T12:38:39,902 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/aec2c35fa1154b87841aca4e0a467873 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/aec2c35fa1154b87841aca4e0a467873 2024-12-17T12:38:39,906 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/aec2c35fa1154b87841aca4e0a467873, entries=150, sequenceid=414, filesize=12.0 K 2024-12-17T12:38:39,906 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=26.84 KB/27480 for 1e405373380390a8eca5f807f91814d6 in 1696ms, sequenceid=414, compaction requested=true 2024-12-17T12:38:39,906 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1e405373380390a8eca5f807f91814d6: 2024-12-17T12:38:39,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1e405373380390a8eca5f807f91814d6:A, priority=-2147483648, current under compaction store size is 1 2024-12-17T12:38:39,907 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-17T12:38:39,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:39,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1e405373380390a8eca5f807f91814d6:B, priority=-2147483648, current under compaction store size is 2 2024-12-17T12:38:39,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:39,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1e405373380390a8eca5f807f91814d6:C, priority=-2147483648, current under compaction store size is 3 2024-12-17T12:38:39,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:38:39,907 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-17T12:38:39,908 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-17T12:38:39,908 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50056 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-17T12:38:39,908 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 134566 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-17T12:38:39,908 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 1e405373380390a8eca5f807f91814d6/B is initiating minor compaction (all files) 2024-12-17T12:38:39,908 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): 1e405373380390a8eca5f807f91814d6/A is initiating minor compaction (all files) 2024-12-17T12:38:39,908 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1e405373380390a8eca5f807f91814d6/B in TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:39,908 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1e405373380390a8eca5f807f91814d6/A in TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:39,908 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/6d0606f959474d948e86105617d29a03, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/b1fbe910599f40d7906e2b6b516a31d2, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/cf2ec8cc0a3c423681b71a9b7c8c612d, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/156f752b2e1a446a99f8ad989cf70731] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp, totalSize=48.9 K 2024-12-17T12:38:39,908 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/15dcd290250e4c5688c69e615b278539, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/e918c70eefae4a71a2ea529f0b452538, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/9ce07be61ddb4828a0ab9b3409ff0990, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/dc77b5c2c10243debb31ec725972b5bb] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp, totalSize=131.4 K 2024-12-17T12:38:39,908 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:39,908 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. files: [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/15dcd290250e4c5688c69e615b278539, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/e918c70eefae4a71a2ea529f0b452538, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/9ce07be61ddb4828a0ab9b3409ff0990, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/dc77b5c2c10243debb31ec725972b5bb] 2024-12-17T12:38:39,909 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=52, ppid=51, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-17T12:38:39,909 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 6d0606f959474d948e86105617d29a03, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=361, earliestPutTs=1734439112691 2024-12-17T12:38:39,909 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting b1fbe910599f40d7906e2b6b516a31d2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=374, earliestPutTs=1734439113814 2024-12-17T12:38:39,909 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 15dcd290250e4c5688c69e615b278539, keycount=150, bloomtype=ROW, size=31.4 K, encoding=NONE, compression=NONE, seqNum=361, earliestPutTs=1734439112691 2024-12-17T12:38:39,909 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting cf2ec8cc0a3c423681b71a9b7c8c612d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=400, earliestPutTs=1734439114961 2024-12-17T12:38:39,909 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting e918c70eefae4a71a2ea529f0b452538, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=374, earliestPutTs=1734439113814 2024-12-17T12:38:39,910 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9ce07be61ddb4828a0ab9b3409ff0990, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=400, earliestPutTs=1734439114961 2024-12-17T12:38:39,910 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 156f752b2e1a446a99f8ad989cf70731, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=414, earliestPutTs=1734439116091 2024-12-17T12:38:39,910 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=53, ppid=52, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=1e405373380390a8eca5f807f91814d6, UNASSIGN}] 2024-12-17T12:38:39,910 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting dc77b5c2c10243debb31ec725972b5bb, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=414, earliestPutTs=1734439116091 2024-12-17T12:38:39,910 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=53, ppid=52, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=1e405373380390a8eca5f807f91814d6, UNASSIGN 2024-12-17T12:38:39,911 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=53 updating hbase:meta row=1e405373380390a8eca5f807f91814d6, regionState=CLOSING, regionLocation=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:39,912 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-17T12:38:39,912 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE; CloseRegionProcedure 1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372}] 2024-12-17T12:38:39,922 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:39,924 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241217c4ed38259fa04726ae87c77abd726510_1e405373380390a8eca5f807f91814d6 store=[table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:39,926 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1e405373380390a8eca5f807f91814d6#B#compaction#192 average throughput is 0.82 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:38:39,927 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/500b9d98b4e94b4da2288defc2c024fd is 50, key is test_row_0/B:col10/1734439116092/Put/seqid=0 2024-12-17T12:38:39,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742063_1239 (size=13289) 2024-12-17T12:38:39,946 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241217c4ed38259fa04726ae87c77abd726510_1e405373380390a8eca5f807f91814d6, store=[table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:39,946 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217c4ed38259fa04726ae87c77abd726510_1e405373380390a8eca5f807f91814d6 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:39,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742064_1240 (size=4469) 2024-12-17T12:38:39,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-17T12:38:40,063 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:40,063 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] handler.UnassignRegionHandler(124): Close 1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:40,063 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-17T12:38:40,064 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1681): Closing 1e405373380390a8eca5f807f91814d6, disabling compactions & flushes 2024-12-17T12:38:40,064 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1942): waiting for 2 compactions to complete for region TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:40,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-17T12:38:40,341 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/500b9d98b4e94b4da2288defc2c024fd as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/500b9d98b4e94b4da2288defc2c024fd 2024-12-17T12:38:40,348 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1e405373380390a8eca5f807f91814d6/B of 1e405373380390a8eca5f807f91814d6 into 500b9d98b4e94b4da2288defc2c024fd(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:38:40,348 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1e405373380390a8eca5f807f91814d6: 2024-12-17T12:38:40,348 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6., storeName=1e405373380390a8eca5f807f91814d6/B, priority=12, startTime=1734439119907; duration=0sec 2024-12-17T12:38:40,348 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:38:40,348 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1e405373380390a8eca5f807f91814d6:B 2024-12-17T12:38:40,348 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. because compaction request was cancelled 2024-12-17T12:38:40,348 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1e405373380390a8eca5f807f91814d6:C 2024-12-17T12:38:40,350 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1e405373380390a8eca5f807f91814d6#A#compaction#193 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:38:40,351 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/9bc764ae0f51438995615fd91fe2641e is 175, key is test_row_0/A:col10/1734439116092/Put/seqid=0 2024-12-17T12:38:40,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742065_1241 (size=32243) 2024-12-17T12:38:40,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-17T12:38:40,760 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/9bc764ae0f51438995615fd91fe2641e as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/9bc764ae0f51438995615fd91fe2641e 2024-12-17T12:38:40,765 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1e405373380390a8eca5f807f91814d6/A of 1e405373380390a8eca5f807f91814d6 into 9bc764ae0f51438995615fd91fe2641e(size=31.5 K), total size for store is 31.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:38:40,765 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1e405373380390a8eca5f807f91814d6: 2024-12-17T12:38:40,765 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6., storeName=1e405373380390a8eca5f807f91814d6/A, priority=12, startTime=1734439119907; duration=0sec 2024-12-17T12:38:40,765 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:40,766 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:40,766 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. after waiting 0 ms 2024-12-17T12:38:40,766 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:40,766 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:40,766 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1e405373380390a8eca5f807f91814d6:A 2024-12-17T12:38:40,766 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(2837): Flushing 1e405373380390a8eca5f807f91814d6 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-12-17T12:38:40,766 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=A 2024-12-17T12:38:40,766 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:40,766 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=B 2024-12-17T12:38:40,766 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:40,766 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1e405373380390a8eca5f807f91814d6, store=C 2024-12-17T12:38:40,766 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:40,771 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412172eed61b1f8f3402c9056831b91cc7b74_1e405373380390a8eca5f807f91814d6 is 50, key is test_row_0/A:col10/1734439119886/Put/seqid=0 2024-12-17T12:38:40,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742066_1242 (size=12454) 2024-12-17T12:38:40,897 DEBUG [regionserver/681c08bfdbdf:0.Chore.1 {}] throttle.PressureAwareCompactionThroughputController(103): CompactionPressure is 0.07692307692307693, tune throughput to 53.85 MB/second 2024-12-17T12:38:41,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-17T12:38:41,175 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:41,180 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412172eed61b1f8f3402c9056831b91cc7b74_1e405373380390a8eca5f807f91814d6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412172eed61b1f8f3402c9056831b91cc7b74_1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:41,181 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/30874f17d93344218b7cd8a35ed7ab0d, store: [table=TestAcidGuarantees family=A region=1e405373380390a8eca5f807f91814d6] 2024-12-17T12:38:41,182 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/30874f17d93344218b7cd8a35ed7ab0d is 175, key is test_row_0/A:col10/1734439119886/Put/seqid=0 2024-12-17T12:38:41,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742067_1243 (size=31255) 2024-12-17T12:38:41,588 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=423, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/30874f17d93344218b7cd8a35ed7ab0d 2024-12-17T12:38:41,603 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/b5a49926905d48118e1ecdabaef8573f is 50, key is test_row_0/B:col10/1734439119886/Put/seqid=0 2024-12-17T12:38:41,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742068_1244 (size=12301) 2024-12-17T12:38:42,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-17T12:38:42,009 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=423 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/b5a49926905d48118e1ecdabaef8573f 2024-12-17T12:38:42,016 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/f29922c13d3843138756441d5017d864 is 50, key is test_row_0/C:col10/1734439119886/Put/seqid=0 2024-12-17T12:38:42,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742069_1245 (size=12301) 2024-12-17T12:38:42,333 DEBUG [master/681c08bfdbdf:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 0747a0e153fecce30e3abad582ed5b21 changed from -1.0 to 0.0, refreshing cache 2024-12-17T12:38:42,421 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=423 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/f29922c13d3843138756441d5017d864 2024-12-17T12:38:42,431 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/A/30874f17d93344218b7cd8a35ed7ab0d as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/30874f17d93344218b7cd8a35ed7ab0d 2024-12-17T12:38:42,436 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/30874f17d93344218b7cd8a35ed7ab0d, entries=150, sequenceid=423, filesize=30.5 K 2024-12-17T12:38:42,437 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/B/b5a49926905d48118e1ecdabaef8573f as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/b5a49926905d48118e1ecdabaef8573f 2024-12-17T12:38:42,441 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/b5a49926905d48118e1ecdabaef8573f, entries=150, sequenceid=423, filesize=12.0 K 2024-12-17T12:38:42,442 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/.tmp/C/f29922c13d3843138756441d5017d864 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/f29922c13d3843138756441d5017d864 2024-12-17T12:38:42,446 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/f29922c13d3843138756441d5017d864, entries=150, sequenceid=423, filesize=12.0 K 2024-12-17T12:38:42,446 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 1e405373380390a8eca5f807f91814d6 in 1680ms, sequenceid=423, compaction requested=true 2024-12-17T12:38:42,447 DEBUG [StoreCloser-TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/f1fa03fe13a14147aca52959d8b3c390, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/089f0a3278734bb8b25017fbd58b0b6b, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/07220ee5658241de91d754e6a1572f38, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/cb10450ef86f4e70ab0c5cf616f3ba20, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/d20bc21b2a7c47dfaa9ca5a50ef2fc31, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/02fac8aaaf7a4789af0987b31d23bf8e, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/6f11a61beae8452b886e894095d512e7, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/ffba13bb77e54cee8de48fb3dbeb5248, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/58899fd529514c828d2ce92b18473641, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/1edb257536804cb7a461928cc6b86604, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/fccd2ef7e1c34ba38248d4fa85d293fe, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/ca6fffae29d3471abb8a558be9fc8b5f, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/f3fa67265e4d45eca56e78ad73a155cd, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/5fadb68a4f4246eda36914464d4bea56, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/3101c90c86dc4031a9bc6b44ec702d5e, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/9c0bb2d768064a3f974276799654e408, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/bd084fcb284e4e11af809e2d78467437, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/4a2d6495838542b9ae7e091fe9d7b22b, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/f7c80a2e770b4cc5af945bff68ed632c, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/50396e8122874a02845dd108c5652ce5, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/10fe7e50df4b40af93a848990998f998, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/0754388232754aaa86275861fb345cd8, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/4acae8bf2fd1420f9c858b0930853a36, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/727affdbb1bf44099287891ef3f19883, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/d3252e5c7e70476ba6a1b5ab7bbf900a, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/15dcd290250e4c5688c69e615b278539, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/e918c70eefae4a71a2ea529f0b452538, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/9ce07be61ddb4828a0ab9b3409ff0990, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/dc77b5c2c10243debb31ec725972b5bb] to archive 2024-12-17T12:38:42,448 DEBUG [StoreCloser-TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-17T12:38:42,450 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/07220ee5658241de91d754e6a1572f38 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/07220ee5658241de91d754e6a1572f38 2024-12-17T12:38:42,450 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/cb10450ef86f4e70ab0c5cf616f3ba20 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/cb10450ef86f4e70ab0c5cf616f3ba20 2024-12-17T12:38:42,451 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/d20bc21b2a7c47dfaa9ca5a50ef2fc31 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/d20bc21b2a7c47dfaa9ca5a50ef2fc31 2024-12-17T12:38:42,451 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/f1fa03fe13a14147aca52959d8b3c390 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/f1fa03fe13a14147aca52959d8b3c390 2024-12-17T12:38:42,451 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/02fac8aaaf7a4789af0987b31d23bf8e to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/02fac8aaaf7a4789af0987b31d23bf8e 2024-12-17T12:38:42,451 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/6f11a61beae8452b886e894095d512e7 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/6f11a61beae8452b886e894095d512e7 2024-12-17T12:38:42,452 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/089f0a3278734bb8b25017fbd58b0b6b to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/089f0a3278734bb8b25017fbd58b0b6b 2024-12-17T12:38:42,452 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/ffba13bb77e54cee8de48fb3dbeb5248 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/ffba13bb77e54cee8de48fb3dbeb5248 2024-12-17T12:38:42,453 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/58899fd529514c828d2ce92b18473641 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/58899fd529514c828d2ce92b18473641 2024-12-17T12:38:42,453 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/1edb257536804cb7a461928cc6b86604 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/1edb257536804cb7a461928cc6b86604 2024-12-17T12:38:42,454 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/fccd2ef7e1c34ba38248d4fa85d293fe to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/fccd2ef7e1c34ba38248d4fa85d293fe 2024-12-17T12:38:42,454 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/f3fa67265e4d45eca56e78ad73a155cd to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/f3fa67265e4d45eca56e78ad73a155cd 2024-12-17T12:38:42,454 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/ca6fffae29d3471abb8a558be9fc8b5f to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/ca6fffae29d3471abb8a558be9fc8b5f 2024-12-17T12:38:42,454 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/9c0bb2d768064a3f974276799654e408 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/9c0bb2d768064a3f974276799654e408 2024-12-17T12:38:42,454 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/3101c90c86dc4031a9bc6b44ec702d5e to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/3101c90c86dc4031a9bc6b44ec702d5e 2024-12-17T12:38:42,455 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/5fadb68a4f4246eda36914464d4bea56 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/5fadb68a4f4246eda36914464d4bea56 2024-12-17T12:38:42,456 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/bd084fcb284e4e11af809e2d78467437 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/bd084fcb284e4e11af809e2d78467437 2024-12-17T12:38:42,456 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/4a2d6495838542b9ae7e091fe9d7b22b to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/4a2d6495838542b9ae7e091fe9d7b22b 2024-12-17T12:38:42,456 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/f7c80a2e770b4cc5af945bff68ed632c to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/f7c80a2e770b4cc5af945bff68ed632c 2024-12-17T12:38:42,456 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/50396e8122874a02845dd108c5652ce5 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/50396e8122874a02845dd108c5652ce5 2024-12-17T12:38:42,456 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/0754388232754aaa86275861fb345cd8 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/0754388232754aaa86275861fb345cd8 2024-12-17T12:38:42,456 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/4acae8bf2fd1420f9c858b0930853a36 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/4acae8bf2fd1420f9c858b0930853a36 2024-12-17T12:38:42,456 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/10fe7e50df4b40af93a848990998f998 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/10fe7e50df4b40af93a848990998f998 2024-12-17T12:38:42,456 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/727affdbb1bf44099287891ef3f19883 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/727affdbb1bf44099287891ef3f19883 2024-12-17T12:38:42,457 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/d3252e5c7e70476ba6a1b5ab7bbf900a to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/d3252e5c7e70476ba6a1b5ab7bbf900a 2024-12-17T12:38:42,457 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/9ce07be61ddb4828a0ab9b3409ff0990 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/9ce07be61ddb4828a0ab9b3409ff0990 2024-12-17T12:38:42,457 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/dc77b5c2c10243debb31ec725972b5bb to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/dc77b5c2c10243debb31ec725972b5bb 2024-12-17T12:38:42,457 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/15dcd290250e4c5688c69e615b278539 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/15dcd290250e4c5688c69e615b278539 2024-12-17T12:38:42,458 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/e918c70eefae4a71a2ea529f0b452538 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/e918c70eefae4a71a2ea529f0b452538 2024-12-17T12:38:42,459 DEBUG [StoreCloser-TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/13e88821aaea4729a1411225ae6eaa62, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/96e167275edd49c99fe4e942f0c6f7f8, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/9287e37cecbc438aa92043a1cdc63c06, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/6b1e93bc0f514a1dbd1fe3ca1c977d43, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/e5387c23bd174c5597466649ebb0693e, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/34c848b094e842388982fe8e38a61c0b, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/9d608a1460634578a1d6e6b18313430f, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/7b311827b4364c9eb4e863a04fc2036c, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/a38db1cef45f461ba0384cc94378624b, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/a6e3078f2bf74bdba37f4d8fa4272d6d, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/ed721ab2b3044baebded65c704b0929a, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/ce3ba2848f5d44118b569c14662de307, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/64fc166784c644b783411886b1d0159d, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/e56a1377797f418da043a76cc5a4f9da, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/cd69e0b600eb4c6bb4ef1cc76a0a7584, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/9d88a4278fdb475ea5f4baddfc2498fd, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/fa3f3d4960c74860b5a1747a44d74edb, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/b7b31530161a4cd9833f9365d0e22b76, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/2ba0812fff8c4d2aac25ffba353837e6, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/0f24d2b9a1c44be68a9624722f787402, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/638e4bd6bc244a099791dc65968f9bbc, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/6b6e74df6ce44b39a61933d616eca2d5, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/23458b861dba423e8becf646769454ec, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/420f175ce29b47fc87e70a14f3dc5076, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/6d0606f959474d948e86105617d29a03, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/8ec0514762084a9292d61233da8ea56c, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/b1fbe910599f40d7906e2b6b516a31d2, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/cf2ec8cc0a3c423681b71a9b7c8c612d, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/156f752b2e1a446a99f8ad989cf70731] to archive 2024-12-17T12:38:42,459 DEBUG [StoreCloser-TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-17T12:38:42,461 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/13e88821aaea4729a1411225ae6eaa62 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/13e88821aaea4729a1411225ae6eaa62 2024-12-17T12:38:42,461 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/96e167275edd49c99fe4e942f0c6f7f8 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/96e167275edd49c99fe4e942f0c6f7f8 2024-12-17T12:38:42,461 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/9287e37cecbc438aa92043a1cdc63c06 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/9287e37cecbc438aa92043a1cdc63c06 2024-12-17T12:38:42,461 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/34c848b094e842388982fe8e38a61c0b to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/34c848b094e842388982fe8e38a61c0b 2024-12-17T12:38:42,462 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/e5387c23bd174c5597466649ebb0693e to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/e5387c23bd174c5597466649ebb0693e 2024-12-17T12:38:42,462 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/7b311827b4364c9eb4e863a04fc2036c to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/7b311827b4364c9eb4e863a04fc2036c 2024-12-17T12:38:42,462 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/6b1e93bc0f514a1dbd1fe3ca1c977d43 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/6b1e93bc0f514a1dbd1fe3ca1c977d43 2024-12-17T12:38:42,463 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/9d608a1460634578a1d6e6b18313430f to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/9d608a1460634578a1d6e6b18313430f 2024-12-17T12:38:42,463 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/ce3ba2848f5d44118b569c14662de307 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/ce3ba2848f5d44118b569c14662de307 2024-12-17T12:38:42,463 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/ed721ab2b3044baebded65c704b0929a to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/ed721ab2b3044baebded65c704b0929a 2024-12-17T12:38:42,463 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/a38db1cef45f461ba0384cc94378624b to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/a38db1cef45f461ba0384cc94378624b 2024-12-17T12:38:42,463 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/64fc166784c644b783411886b1d0159d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/64fc166784c644b783411886b1d0159d 2024-12-17T12:38:42,463 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/a6e3078f2bf74bdba37f4d8fa4272d6d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/a6e3078f2bf74bdba37f4d8fa4272d6d 2024-12-17T12:38:42,464 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/cd69e0b600eb4c6bb4ef1cc76a0a7584 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/cd69e0b600eb4c6bb4ef1cc76a0a7584 2024-12-17T12:38:42,464 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/e56a1377797f418da043a76cc5a4f9da to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/e56a1377797f418da043a76cc5a4f9da 2024-12-17T12:38:42,464 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/9d88a4278fdb475ea5f4baddfc2498fd to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/9d88a4278fdb475ea5f4baddfc2498fd 2024-12-17T12:38:42,465 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/b7b31530161a4cd9833f9365d0e22b76 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/b7b31530161a4cd9833f9365d0e22b76 2024-12-17T12:38:42,465 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/2ba0812fff8c4d2aac25ffba353837e6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/2ba0812fff8c4d2aac25ffba353837e6 2024-12-17T12:38:42,465 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/fa3f3d4960c74860b5a1747a44d74edb to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/fa3f3d4960c74860b5a1747a44d74edb 2024-12-17T12:38:42,465 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/638e4bd6bc244a099791dc65968f9bbc to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/638e4bd6bc244a099791dc65968f9bbc 2024-12-17T12:38:42,465 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/0f24d2b9a1c44be68a9624722f787402 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/0f24d2b9a1c44be68a9624722f787402 2024-12-17T12:38:42,466 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/6b6e74df6ce44b39a61933d616eca2d5 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/6b6e74df6ce44b39a61933d616eca2d5 2024-12-17T12:38:42,466 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/420f175ce29b47fc87e70a14f3dc5076 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/420f175ce29b47fc87e70a14f3dc5076 2024-12-17T12:38:42,466 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/23458b861dba423e8becf646769454ec to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/23458b861dba423e8becf646769454ec 2024-12-17T12:38:42,466 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/6d0606f959474d948e86105617d29a03 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/6d0606f959474d948e86105617d29a03 2024-12-17T12:38:42,466 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/8ec0514762084a9292d61233da8ea56c to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/8ec0514762084a9292d61233da8ea56c 2024-12-17T12:38:42,467 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/b1fbe910599f40d7906e2b6b516a31d2 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/b1fbe910599f40d7906e2b6b516a31d2 2024-12-17T12:38:42,467 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/cf2ec8cc0a3c423681b71a9b7c8c612d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/cf2ec8cc0a3c423681b71a9b7c8c612d 2024-12-17T12:38:42,467 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/156f752b2e1a446a99f8ad989cf70731 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/156f752b2e1a446a99f8ad989cf70731 2024-12-17T12:38:42,468 DEBUG [StoreCloser-TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/3e76a0c002c84b71b3151ebbf8c2f7d4, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/33c8580942c845288485f1dfbb5726cd, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/f35cbaa06bd448e18e7d5dd03bd4f7a2, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/974b0873b5c843fdb81b283228306b4b, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/7f47bedaab014415801096fea2c7457d, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/aade91c184f14207afe53bc0f3c5535f, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/faaa21e695b4415088259581a80445df, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/498c0595ef874f75ba32a7e0566b6195, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/8cdc7729cba94ed081310f255c931d00, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/510c93f247244d98842f0cb584b96d88, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/f9a240935ccb477489c29f8cd96cd806, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/eb8ab45c9c6e4bf9b0c1e860bb742837, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/d89964cc17534f1fa1c35a9bc3fb0bd5, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/c1c6bd0df0b94f97b1b16d7f4af15f19, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/07cfe1eaa6634426b218d472d13168be, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/eccf3f8b305a4368bdcad4215c1bee1e, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/8d5e5eda76b749e49a8484c7ccfd661b, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/77ce45967c38417cb302080bbf78ce4e, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/78e28aeb891b43e4aca72cd10a0f245f, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/a6f3d25a21894b56b1570d2d2ced9701, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/51e1cc04c7cf4be78c0710cb74abf08f, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/9e809a2548c44593bf241ec423af0785, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/dc26afb57abc41bdbcb8813bd35f6c83, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/b86c111f18b04a99be38edc9d6a66198, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/586bc4ac9f114bdbb74b317f4ce1d3d5] to archive 2024-12-17T12:38:42,469 DEBUG [StoreCloser-TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-17T12:38:42,471 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/33c8580942c845288485f1dfbb5726cd to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/33c8580942c845288485f1dfbb5726cd 2024-12-17T12:38:42,471 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/f35cbaa06bd448e18e7d5dd03bd4f7a2 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/f35cbaa06bd448e18e7d5dd03bd4f7a2 2024-12-17T12:38:42,471 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/3e76a0c002c84b71b3151ebbf8c2f7d4 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/3e76a0c002c84b71b3151ebbf8c2f7d4 2024-12-17T12:38:42,471 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/974b0873b5c843fdb81b283228306b4b to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/974b0873b5c843fdb81b283228306b4b 2024-12-17T12:38:42,471 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/aade91c184f14207afe53bc0f3c5535f to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/aade91c184f14207afe53bc0f3c5535f 2024-12-17T12:38:42,472 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/faaa21e695b4415088259581a80445df to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/faaa21e695b4415088259581a80445df 2024-12-17T12:38:42,472 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/7f47bedaab014415801096fea2c7457d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/7f47bedaab014415801096fea2c7457d 2024-12-17T12:38:42,472 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/498c0595ef874f75ba32a7e0566b6195 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/498c0595ef874f75ba32a7e0566b6195 2024-12-17T12:38:42,473 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/8cdc7729cba94ed081310f255c931d00 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/8cdc7729cba94ed081310f255c931d00 2024-12-17T12:38:42,473 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/eb8ab45c9c6e4bf9b0c1e860bb742837 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/eb8ab45c9c6e4bf9b0c1e860bb742837 2024-12-17T12:38:42,473 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/d89964cc17534f1fa1c35a9bc3fb0bd5 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/d89964cc17534f1fa1c35a9bc3fb0bd5 2024-12-17T12:38:42,474 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/f9a240935ccb477489c29f8cd96cd806 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/f9a240935ccb477489c29f8cd96cd806 2024-12-17T12:38:42,474 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/07cfe1eaa6634426b218d472d13168be to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/07cfe1eaa6634426b218d472d13168be 2024-12-17T12:38:42,474 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/c1c6bd0df0b94f97b1b16d7f4af15f19 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/c1c6bd0df0b94f97b1b16d7f4af15f19 2024-12-17T12:38:42,474 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/510c93f247244d98842f0cb584b96d88 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/510c93f247244d98842f0cb584b96d88 2024-12-17T12:38:42,475 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/eccf3f8b305a4368bdcad4215c1bee1e to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/eccf3f8b305a4368bdcad4215c1bee1e 2024-12-17T12:38:42,475 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/8d5e5eda76b749e49a8484c7ccfd661b to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/8d5e5eda76b749e49a8484c7ccfd661b 2024-12-17T12:38:42,475 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/77ce45967c38417cb302080bbf78ce4e to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/77ce45967c38417cb302080bbf78ce4e 2024-12-17T12:38:42,475 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/78e28aeb891b43e4aca72cd10a0f245f to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/78e28aeb891b43e4aca72cd10a0f245f 2024-12-17T12:38:42,476 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/a6f3d25a21894b56b1570d2d2ced9701 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/a6f3d25a21894b56b1570d2d2ced9701 2024-12-17T12:38:42,476 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/51e1cc04c7cf4be78c0710cb74abf08f to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/51e1cc04c7cf4be78c0710cb74abf08f 2024-12-17T12:38:42,476 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/dc26afb57abc41bdbcb8813bd35f6c83 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/dc26afb57abc41bdbcb8813bd35f6c83 2024-12-17T12:38:42,476 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/9e809a2548c44593bf241ec423af0785 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/9e809a2548c44593bf241ec423af0785 2024-12-17T12:38:42,476 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/b86c111f18b04a99be38edc9d6a66198 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/b86c111f18b04a99be38edc9d6a66198 2024-12-17T12:38:42,477 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/586bc4ac9f114bdbb74b317f4ce1d3d5 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/586bc4ac9f114bdbb74b317f4ce1d3d5 2024-12-17T12:38:42,481 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/recovered.edits/426.seqid, newMaxSeqId=426, maxSeqId=4 2024-12-17T12:38:42,482 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6. 2024-12-17T12:38:42,482 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1635): Region close journal for 1e405373380390a8eca5f807f91814d6: 2024-12-17T12:38:42,483 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] handler.UnassignRegionHandler(170): Closed 1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:42,484 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=53 updating hbase:meta row=1e405373380390a8eca5f807f91814d6, regionState=CLOSED 2024-12-17T12:38:42,486 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=53 2024-12-17T12:38:42,486 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=53, state=SUCCESS; CloseRegionProcedure 1e405373380390a8eca5f807f91814d6, server=681c08bfdbdf,36491,1734439058372 in 2.5730 sec 2024-12-17T12:38:42,487 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=53, resume processing ppid=52 2024-12-17T12:38:42,487 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, ppid=52, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=1e405373380390a8eca5f807f91814d6, UNASSIGN in 2.5760 sec 2024-12-17T12:38:42,488 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=52, resume processing ppid=51 2024-12-17T12:38:42,488 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=51, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 2.5800 sec 2024-12-17T12:38:42,489 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734439122489"}]},"ts":"1734439122489"} 2024-12-17T12:38:42,490 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-17T12:38:42,533 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-17T12:38:42,534 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 2.6400 sec 2024-12-17T12:38:44,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-17T12:38:44,005 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 51 completed 2024-12-17T12:38:44,007 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-17T12:38:44,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-17T12:38:44,011 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=55, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-17T12:38:44,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-17T12:38:44,013 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=55, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-17T12:38:44,017 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:44,021 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A, FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B, FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C, FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/recovered.edits] 2024-12-17T12:38:44,026 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/9bc764ae0f51438995615fd91fe2641e to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/9bc764ae0f51438995615fd91fe2641e 2024-12-17T12:38:44,026 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/30874f17d93344218b7cd8a35ed7ab0d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/A/30874f17d93344218b7cd8a35ed7ab0d 2024-12-17T12:38:44,031 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/500b9d98b4e94b4da2288defc2c024fd to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/500b9d98b4e94b4da2288defc2c024fd 2024-12-17T12:38:44,031 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/b5a49926905d48118e1ecdabaef8573f to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/B/b5a49926905d48118e1ecdabaef8573f 2024-12-17T12:38:44,036 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/343881dd273c4c2a8eddba3991d9691d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/343881dd273c4c2a8eddba3991d9691d 2024-12-17T12:38:44,036 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/51f77ae4852942febcd962b96314610a to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/51f77ae4852942febcd962b96314610a 2024-12-17T12:38:44,037 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/e6b726d5ca4241f28267f6036c34c13c to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/e6b726d5ca4241f28267f6036c34c13c 2024-12-17T12:38:44,037 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/aec2c35fa1154b87841aca4e0a467873 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/aec2c35fa1154b87841aca4e0a467873 2024-12-17T12:38:44,037 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/f29922c13d3843138756441d5017d864 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/C/f29922c13d3843138756441d5017d864 2024-12-17T12:38:44,041 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/recovered.edits/426.seqid to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6/recovered.edits/426.seqid 2024-12-17T12:38:44,041 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:44,042 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-17T12:38:44,042 DEBUG [PEWorker-2 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-17T12:38:44,044 DEBUG [PEWorker-2 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-17T12:38:44,053 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121720ccf7fd67c54dc08c15b7223238fe04_1e405373380390a8eca5f807f91814d6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121720ccf7fd67c54dc08c15b7223238fe04_1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:44,053 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121704363d0ede254c3f882e09f27d722b52_1e405373380390a8eca5f807f91814d6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121704363d0ede254c3f882e09f27d722b52_1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:44,053 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412172dbef429c05c4fc59a315acc820a6764_1e405373380390a8eca5f807f91814d6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412172dbef429c05c4fc59a315acc820a6764_1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:44,053 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412175767eb46f9d7439184904aa73712ac53_1e405373380390a8eca5f807f91814d6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412175767eb46f9d7439184904aa73712ac53_1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:44,053 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121708247e54dced4a6e83ff1c9e2890a41f_1e405373380390a8eca5f807f91814d6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121708247e54dced4a6e83ff1c9e2890a41f_1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:44,054 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217606f6e5a2db943b9943bef8cb1f01ea2_1e405373380390a8eca5f807f91814d6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217606f6e5a2db943b9943bef8cb1f01ea2_1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:44,054 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217635023de6ece490190707f23a27b6415_1e405373380390a8eca5f807f91814d6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217635023de6ece490190707f23a27b6415_1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:44,054 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412172eed61b1f8f3402c9056831b91cc7b74_1e405373380390a8eca5f807f91814d6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412172eed61b1f8f3402c9056831b91cc7b74_1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:44,055 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121774b9db08c69740c59cff5b5ad1ed138a_1e405373380390a8eca5f807f91814d6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121774b9db08c69740c59cff5b5ad1ed138a_1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:44,055 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412177b88e5e8b879473c80ed1afc48aaa9d9_1e405373380390a8eca5f807f91814d6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412177b88e5e8b879473c80ed1afc48aaa9d9_1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:44,055 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412179ef86698222a46f29de02e78b910aeff_1e405373380390a8eca5f807f91814d6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412179ef86698222a46f29de02e78b910aeff_1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:44,055 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217aeb8ef476b5046a5a7a717cc5e95cba1_1e405373380390a8eca5f807f91814d6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217aeb8ef476b5046a5a7a717cc5e95cba1_1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:44,055 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217af4122011bea4023b8462373b0bbd488_1e405373380390a8eca5f807f91814d6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217af4122011bea4023b8462373b0bbd488_1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:44,055 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217b32ea4e78e114b44aeaf94ccd695904a_1e405373380390a8eca5f807f91814d6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217b32ea4e78e114b44aeaf94ccd695904a_1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:44,055 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217c13fd1eeafd147f29427520a4ec96e5d_1e405373380390a8eca5f807f91814d6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217c13fd1eeafd147f29427520a4ec96e5d_1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:44,055 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217b5f75e12aff84c1398e6b50f58f92555_1e405373380390a8eca5f807f91814d6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217b5f75e12aff84c1398e6b50f58f92555_1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:44,056 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217c6e631f88d504a56a1362430249838ef_1e405373380390a8eca5f807f91814d6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217c6e631f88d504a56a1362430249838ef_1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:44,056 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217dd5b7cf0cf8c43a983ea2f988ebe99f2_1e405373380390a8eca5f807f91814d6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217dd5b7cf0cf8c43a983ea2f988ebe99f2_1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:44,056 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217e724601f1c2d4b88bdd8ca638c5c0a8d_1e405373380390a8eca5f807f91814d6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217e724601f1c2d4b88bdd8ca638c5c0a8d_1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:44,056 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217ced54d07237a43c3b883749a43fd1d52_1e405373380390a8eca5f807f91814d6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217ced54d07237a43c3b883749a43fd1d52_1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:44,056 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217cdb2339d7b1c4c52b8902de8c85a6383_1e405373380390a8eca5f807f91814d6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217cdb2339d7b1c4c52b8902de8c85a6383_1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:44,056 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217f06d1943bffb432ba45e0f9d68317679_1e405373380390a8eca5f807f91814d6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217f06d1943bffb432ba45e0f9d68317679_1e405373380390a8eca5f807f91814d6 2024-12-17T12:38:44,057 DEBUG [PEWorker-2 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-17T12:38:44,058 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=55, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-17T12:38:44,060 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-17T12:38:44,064 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-17T12:38:44,065 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=55, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-17T12:38:44,065 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-17T12:38:44,066 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734439124065"}]},"ts":"9223372036854775807"} 2024-12-17T12:38:44,068 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-17T12:38:44,068 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 1e405373380390a8eca5f807f91814d6, NAME => 'TestAcidGuarantees,,1734439093207.1e405373380390a8eca5f807f91814d6.', STARTKEY => '', ENDKEY => ''}] 2024-12-17T12:38:44,068 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-17T12:38:44,068 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734439124068"}]},"ts":"9223372036854775807"} 2024-12-17T12:38:44,070 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-17T12:38:44,075 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=55, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-17T12:38:44,076 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 68 msec 2024-12-17T12:38:44,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-17T12:38:44,114 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 55 completed 2024-12-17T12:38:44,122 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=249 (was 245) Potentially hanging thread: hconnection-0x3fe77b6d-shared-pool-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-675892322_22 at /127.0.0.1:40006 [Waiting for operation #372] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2077175215_22 at /127.0.0.1:39996 [Waiting for operation #359] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-675892322_22 at /127.0.0.1:39780 [Waiting for operation #446] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x3fe77b6d-shared-pool-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x3fe77b6d-shared-pool-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/63e851ed-2e94-2a83-69e0-0dcd439348a2/cluster_e84f10d9-83a3-7112-3c7b-cf7e72d3a51d/dfs/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/63e851ed-2e94-2a83-69e0-0dcd439348a2/cluster_e84f10d9-83a3-7112-3c7b-cf7e72d3a51d/dfs/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2077175215_22 at /127.0.0.1:39862 [Waiting for operation #418] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x3fe77b6d-shared-pool-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=460 (was 453) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=270 (was 240) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3803 (was 3910) 2024-12-17T12:38:44,129 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=249, OpenFileDescriptor=460, MaxFileDescriptor=1048576, SystemLoadAverage=270, ProcessCount=11, AvailableMemoryMB=3802 2024-12-17T12:38:44,131 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-17T12:38:44,131 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-17T12:38:44,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=56, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-17T12:38:44,132 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=56, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-17T12:38:44,133 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:44,133 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 56 2024-12-17T12:38:44,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=56 2024-12-17T12:38:44,134 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=56, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-17T12:38:44,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742070_1246 (size=963) 2024-12-17T12:38:44,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=56 2024-12-17T12:38:44,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=56 2024-12-17T12:38:44,544 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9 2024-12-17T12:38:44,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742071_1247 (size=53) 2024-12-17T12:38:44,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=56 2024-12-17T12:38:44,953 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-17T12:38:44,953 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 49a464a6255681856b85f50432ce7984, disabling compactions & flushes 2024-12-17T12:38:44,953 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:44,953 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:44,953 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. after waiting 0 ms 2024-12-17T12:38:44,953 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:44,954 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:44,954 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 49a464a6255681856b85f50432ce7984: 2024-12-17T12:38:44,955 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=56, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-17T12:38:44,955 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1734439124955"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734439124955"}]},"ts":"1734439124955"} 2024-12-17T12:38:44,956 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-17T12:38:44,957 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=56, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-17T12:38:44,957 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734439124957"}]},"ts":"1734439124957"} 2024-12-17T12:38:44,958 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-17T12:38:45,008 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=57, ppid=56, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=49a464a6255681856b85f50432ce7984, ASSIGN}] 2024-12-17T12:38:45,010 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=57, ppid=56, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=49a464a6255681856b85f50432ce7984, ASSIGN 2024-12-17T12:38:45,011 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=57, ppid=56, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=49a464a6255681856b85f50432ce7984, ASSIGN; state=OFFLINE, location=681c08bfdbdf,36491,1734439058372; forceNewPlan=false, retain=false 2024-12-17T12:38:45,162 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=57 updating hbase:meta row=49a464a6255681856b85f50432ce7984, regionState=OPENING, regionLocation=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:45,165 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE; OpenRegionProcedure 49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372}] 2024-12-17T12:38:45,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=56 2024-12-17T12:38:45,318 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:45,323 INFO [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=58}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:45,323 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=58}] regionserver.HRegion(7285): Opening region: {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} 2024-12-17T12:38:45,324 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=58}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 49a464a6255681856b85f50432ce7984 2024-12-17T12:38:45,324 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=58}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-17T12:38:45,324 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=58}] regionserver.HRegion(7327): checking encryption for 49a464a6255681856b85f50432ce7984 2024-12-17T12:38:45,324 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=58}] regionserver.HRegion(7330): checking classloading for 49a464a6255681856b85f50432ce7984 2024-12-17T12:38:45,327 INFO [StoreOpener-49a464a6255681856b85f50432ce7984-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 49a464a6255681856b85f50432ce7984 2024-12-17T12:38:45,329 INFO [StoreOpener-49a464a6255681856b85f50432ce7984-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-17T12:38:45,330 INFO [StoreOpener-49a464a6255681856b85f50432ce7984-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 49a464a6255681856b85f50432ce7984 columnFamilyName A 2024-12-17T12:38:45,330 DEBUG [StoreOpener-49a464a6255681856b85f50432ce7984-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:45,331 INFO [StoreOpener-49a464a6255681856b85f50432ce7984-1 {}] regionserver.HStore(327): Store=49a464a6255681856b85f50432ce7984/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-17T12:38:45,331 INFO [StoreOpener-49a464a6255681856b85f50432ce7984-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 49a464a6255681856b85f50432ce7984 2024-12-17T12:38:45,332 INFO [StoreOpener-49a464a6255681856b85f50432ce7984-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-17T12:38:45,332 INFO [StoreOpener-49a464a6255681856b85f50432ce7984-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 49a464a6255681856b85f50432ce7984 columnFamilyName B 2024-12-17T12:38:45,332 DEBUG [StoreOpener-49a464a6255681856b85f50432ce7984-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:45,333 INFO [StoreOpener-49a464a6255681856b85f50432ce7984-1 {}] regionserver.HStore(327): Store=49a464a6255681856b85f50432ce7984/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-17T12:38:45,333 INFO [StoreOpener-49a464a6255681856b85f50432ce7984-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 49a464a6255681856b85f50432ce7984 2024-12-17T12:38:45,334 INFO [StoreOpener-49a464a6255681856b85f50432ce7984-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-17T12:38:45,334 INFO [StoreOpener-49a464a6255681856b85f50432ce7984-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 49a464a6255681856b85f50432ce7984 columnFamilyName C 2024-12-17T12:38:45,334 DEBUG [StoreOpener-49a464a6255681856b85f50432ce7984-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:38:45,334 INFO [StoreOpener-49a464a6255681856b85f50432ce7984-1 {}] regionserver.HStore(327): Store=49a464a6255681856b85f50432ce7984/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-17T12:38:45,335 INFO [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=58}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:45,335 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=58}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984 2024-12-17T12:38:45,336 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=58}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984 2024-12-17T12:38:45,337 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=58}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-17T12:38:45,338 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=58}] regionserver.HRegion(1085): writing seq id for 49a464a6255681856b85f50432ce7984 2024-12-17T12:38:45,340 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=58}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-17T12:38:45,341 INFO [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=58}] regionserver.HRegion(1102): Opened 49a464a6255681856b85f50432ce7984; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74934113, jitterRate=0.11660529673099518}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-17T12:38:45,341 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=58}] regionserver.HRegion(1001): Region open journal for 49a464a6255681856b85f50432ce7984: 2024-12-17T12:38:45,342 INFO [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=58}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984., pid=58, masterSystemTime=1734439125318 2024-12-17T12:38:45,343 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=58}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:45,343 INFO [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=58}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:45,344 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=57 updating hbase:meta row=49a464a6255681856b85f50432ce7984, regionState=OPEN, openSeqNum=2, regionLocation=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:45,346 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=58, resume processing ppid=57 2024-12-17T12:38:45,346 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; OpenRegionProcedure 49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 in 180 msec 2024-12-17T12:38:45,347 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=57, resume processing ppid=56 2024-12-17T12:38:45,347 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, ppid=56, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=49a464a6255681856b85f50432ce7984, ASSIGN in 338 msec 2024-12-17T12:38:45,347 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=56, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-17T12:38:45,347 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734439125347"}]},"ts":"1734439125347"} 2024-12-17T12:38:45,348 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-17T12:38:45,358 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=56, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-17T12:38:45,359 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2270 sec 2024-12-17T12:38:46,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=56 2024-12-17T12:38:46,242 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 56 completed 2024-12-17T12:38:46,246 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x75565da1 to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@125487ad 2024-12-17T12:38:46,266 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@23d94a92, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:38:46,268 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:38:46,269 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48290, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:38:46,270 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-17T12:38:46,271 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51640, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-17T12:38:46,274 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x528c21b0 to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6453bda3 2024-12-17T12:38:46,283 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d37e661, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:38:46,285 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2d89d666 to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@54cec8d1 2024-12-17T12:38:46,292 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66f838c7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:38:46,294 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x44769f38 to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@665c4f27 2024-12-17T12:38:46,300 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@41457d2b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:38:46,302 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6cf29c07 to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6853d3c9 2024-12-17T12:38:46,309 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@29557cd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:38:46,310 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0cfb5a18 to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7ac01dd5 2024-12-17T12:38:46,317 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5243067f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:38:46,320 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x05118689 to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4fab18aa 2024-12-17T12:38:46,326 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@23ca9a7d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:38:46,327 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7d6434c1 to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6a1d0d8f 2024-12-17T12:38:46,333 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d737bc3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:38:46,334 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x739f6ad6 to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@71ec705f 2024-12-17T12:38:46,342 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7747a0c2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:38:46,343 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5caaf139 to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4e560c7b 2024-12-17T12:38:46,350 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ddf4c3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:38:46,352 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x04506927 to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7a9b9802 2024-12-17T12:38:46,359 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@118b007e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:38:46,363 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-17T12:38:46,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees 2024-12-17T12:38:46,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-17T12:38:46,365 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-17T12:38:46,365 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-17T12:38:46,365 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-17T12:38:46,366 DEBUG [hconnection-0x32d4ad3a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:38:46,366 DEBUG [hconnection-0x4f3ad13f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:38:46,366 DEBUG [hconnection-0x11535536-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:38:46,367 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48314, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:38:46,367 DEBUG [hconnection-0x40a08cff-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:38:46,367 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48298, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:38:46,368 DEBUG [hconnection-0x330bd571-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:38:46,368 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48326, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:38:46,368 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48328, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:38:46,368 DEBUG [hconnection-0x3e458b6a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:38:46,369 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48352, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:38:46,369 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48354, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:38:46,371 DEBUG [hconnection-0x189c9974-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:38:46,372 DEBUG [hconnection-0x119f3638-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:38:46,372 DEBUG [hconnection-0x776ab335-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:38:46,372 DEBUG [hconnection-0x57865412-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:38:46,372 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48362, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:38:46,372 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48372, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:38:46,373 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48380, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:38:46,373 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48382, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:38:46,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 49a464a6255681856b85f50432ce7984 2024-12-17T12:38:46,377 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 49a464a6255681856b85f50432ce7984 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-17T12:38:46,378 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=A 2024-12-17T12:38:46,378 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:46,378 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=B 2024-12-17T12:38:46,378 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:46,378 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=C 2024-12-17T12:38:46,378 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:46,405 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:46,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439186401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:46,407 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:46,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439186403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:46,409 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/5cd31a3c660a4f0aafb8f848f5c6e7e8 is 50, key is test_row_0/A:col10/1734439126377/Put/seqid=0 2024-12-17T12:38:46,410 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:46,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439186406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:46,410 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:46,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439186406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:46,411 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:46,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439186406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:46,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742072_1248 (size=12001) 2024-12-17T12:38:46,438 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/5cd31a3c660a4f0aafb8f848f5c6e7e8 2024-12-17T12:38:46,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-17T12:38:46,468 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/d9d4db24b2314cb8964f2ea8d1c0ab25 is 50, key is test_row_0/B:col10/1734439126377/Put/seqid=0 2024-12-17T12:38:46,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742073_1249 (size=12001) 2024-12-17T12:38:46,501 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/d9d4db24b2314cb8964f2ea8d1c0ab25 2024-12-17T12:38:46,508 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:46,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439186507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:46,510 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:46,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439186508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:46,513 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:46,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439186511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:46,513 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:46,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439186511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:46,513 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:46,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439186511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:46,517 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:46,518 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-17T12:38:46,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:46,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. as already flushing 2024-12-17T12:38:46,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:46,518 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:46,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:46,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:46,536 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/95eebe543b14412685a92aeac5787a4e is 50, key is test_row_0/C:col10/1734439126377/Put/seqid=0 2024-12-17T12:38:46,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742074_1250 (size=12001) 2024-12-17T12:38:46,559 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/95eebe543b14412685a92aeac5787a4e 2024-12-17T12:38:46,567 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/5cd31a3c660a4f0aafb8f848f5c6e7e8 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/5cd31a3c660a4f0aafb8f848f5c6e7e8 2024-12-17T12:38:46,573 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/5cd31a3c660a4f0aafb8f848f5c6e7e8, entries=150, sequenceid=14, filesize=11.7 K 2024-12-17T12:38:46,574 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/d9d4db24b2314cb8964f2ea8d1c0ab25 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/d9d4db24b2314cb8964f2ea8d1c0ab25 2024-12-17T12:38:46,578 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/d9d4db24b2314cb8964f2ea8d1c0ab25, entries=150, sequenceid=14, filesize=11.7 K 2024-12-17T12:38:46,579 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/95eebe543b14412685a92aeac5787a4e as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/95eebe543b14412685a92aeac5787a4e 2024-12-17T12:38:46,584 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/95eebe543b14412685a92aeac5787a4e, entries=150, sequenceid=14, filesize=11.7 K 2024-12-17T12:38:46,586 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 49a464a6255681856b85f50432ce7984 in 209ms, sequenceid=14, compaction requested=false 2024-12-17T12:38:46,586 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 49a464a6255681856b85f50432ce7984: 2024-12-17T12:38:46,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-17T12:38:46,671 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:46,671 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-17T12:38:46,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:46,671 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2837): Flushing 49a464a6255681856b85f50432ce7984 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-17T12:38:46,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=A 2024-12-17T12:38:46,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:46,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=B 2024-12-17T12:38:46,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:46,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=C 2024-12-17T12:38:46,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:46,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/ad49a32f2a2947d99035999fd5d4ffdb is 50, key is test_row_0/A:col10/1734439126403/Put/seqid=0 2024-12-17T12:38:46,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742075_1251 (size=12001) 2024-12-17T12:38:46,693 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/ad49a32f2a2947d99035999fd5d4ffdb 2024-12-17T12:38:46,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/b477db99b9c5475fa709d9c1291e6391 is 50, key is test_row_0/B:col10/1734439126403/Put/seqid=0 2024-12-17T12:38:46,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 49a464a6255681856b85f50432ce7984 2024-12-17T12:38:46,713 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. as already flushing 2024-12-17T12:38:46,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742076_1252 (size=12001) 2024-12-17T12:38:46,715 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/b477db99b9c5475fa709d9c1291e6391 2024-12-17T12:38:46,725 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:46,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439186721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:46,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/bb72041219cd4e6a9c1d44ed078a8233 is 50, key is test_row_0/C:col10/1734439126403/Put/seqid=0 2024-12-17T12:38:46,728 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:46,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439186722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:46,728 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:46,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439186724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:46,729 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:46,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439186725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:46,729 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:46,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439186725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:46,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742077_1253 (size=12001) 2024-12-17T12:38:46,746 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/bb72041219cd4e6a9c1d44ed078a8233 2024-12-17T12:38:46,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/ad49a32f2a2947d99035999fd5d4ffdb as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/ad49a32f2a2947d99035999fd5d4ffdb 2024-12-17T12:38:46,756 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/ad49a32f2a2947d99035999fd5d4ffdb, entries=150, sequenceid=37, filesize=11.7 K 2024-12-17T12:38:46,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/b477db99b9c5475fa709d9c1291e6391 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/b477db99b9c5475fa709d9c1291e6391 2024-12-17T12:38:46,764 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/b477db99b9c5475fa709d9c1291e6391, entries=150, sequenceid=37, filesize=11.7 K 2024-12-17T12:38:46,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/bb72041219cd4e6a9c1d44ed078a8233 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/bb72041219cd4e6a9c1d44ed078a8233 2024-12-17T12:38:46,772 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/bb72041219cd4e6a9c1d44ed078a8233, entries=150, sequenceid=37, filesize=11.7 K 2024-12-17T12:38:46,773 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for 49a464a6255681856b85f50432ce7984 in 101ms, sequenceid=37, compaction requested=false 2024-12-17T12:38:46,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2538): Flush status journal for 49a464a6255681856b85f50432ce7984: 2024-12-17T12:38:46,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:46,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=60 2024-12-17T12:38:46,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4106): Remote procedure done, pid=60 2024-12-17T12:38:46,774 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=60, resume processing ppid=59 2024-12-17T12:38:46,774 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, ppid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 409 msec 2024-12-17T12:38:46,776 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees in 412 msec 2024-12-17T12:38:46,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 49a464a6255681856b85f50432ce7984 2024-12-17T12:38:46,832 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 49a464a6255681856b85f50432ce7984 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-17T12:38:46,833 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=A 2024-12-17T12:38:46,833 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:46,833 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=B 2024-12-17T12:38:46,833 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:46,833 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=C 2024-12-17T12:38:46,833 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:46,837 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/2d31e542e7464c38a0ad5bf388457528 is 50, key is test_row_0/A:col10/1734439126830/Put/seqid=0 2024-12-17T12:38:46,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742078_1254 (size=14341) 2024-12-17T12:38:46,844 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:46,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439186842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:46,845 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:46,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439186843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:46,846 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:46,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439186843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:46,846 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:46,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439186844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:46,846 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:46,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439186844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:46,947 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:46,947 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:46,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439186945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:46,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439186945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:46,948 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:46,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439186946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:46,949 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:46,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439186947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:46,949 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:46,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439186947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:46,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-17T12:38:46,967 INFO [Thread-1149 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 59 completed 2024-12-17T12:38:46,968 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-17T12:38:46,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees 2024-12-17T12:38:46,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-17T12:38:46,969 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-17T12:38:46,969 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-17T12:38:46,970 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=62, ppid=61, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-17T12:38:47,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-17T12:38:47,121 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:47,121 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-17T12:38:47,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:47,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. as already flushing 2024-12-17T12:38:47,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:47,122 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:47,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:47,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:47,149 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:47,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439187149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:47,151 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:47,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439187150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:47,151 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:47,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439187150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:47,151 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:47,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439187150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:47,152 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:47,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439187151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:47,242 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/2d31e542e7464c38a0ad5bf388457528 2024-12-17T12:38:47,249 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/13b6408319b24f0a9ae6d27db53d246c is 50, key is test_row_0/B:col10/1734439126830/Put/seqid=0 2024-12-17T12:38:47,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742079_1255 (size=12001) 2024-12-17T12:38:47,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-17T12:38:47,273 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:47,274 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-17T12:38:47,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:47,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. as already flushing 2024-12-17T12:38:47,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:47,274 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:47,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:47,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:47,426 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:47,426 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-17T12:38:47,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:47,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. as already flushing 2024-12-17T12:38:47,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:47,426 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:47,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:47,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:47,453 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:47,453 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:47,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439187452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:47,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439187451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:47,454 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:47,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439187452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:47,455 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:47,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439187453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:47,456 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:47,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439187454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:47,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-17T12:38:47,578 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:47,578 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-17T12:38:47,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:47,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. as already flushing 2024-12-17T12:38:47,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:47,579 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:47,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:47,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:47,653 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/13b6408319b24f0a9ae6d27db53d246c 2024-12-17T12:38:47,660 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/9906bd394c1a49929023dc286d5877e6 is 50, key is test_row_0/C:col10/1734439126830/Put/seqid=0 2024-12-17T12:38:47,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742080_1256 (size=12001) 2024-12-17T12:38:47,731 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:47,731 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-17T12:38:47,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:47,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. as already flushing 2024-12-17T12:38:47,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:47,731 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:47,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:47,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:47,883 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:47,883 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-17T12:38:47,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:47,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. as already flushing 2024-12-17T12:38:47,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:47,884 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:47,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:47,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:47,957 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:47,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439187957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:47,960 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:47,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439187957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:47,960 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:47,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439187958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:47,960 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:47,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439187959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:47,963 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:47,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439187961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:48,004 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-17T12:38:48,035 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:48,036 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-17T12:38:48,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:48,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. as already flushing 2024-12-17T12:38:48,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:48,036 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:48,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:48,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:48,065 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/9906bd394c1a49929023dc286d5877e6 2024-12-17T12:38:48,069 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/2d31e542e7464c38a0ad5bf388457528 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/2d31e542e7464c38a0ad5bf388457528 2024-12-17T12:38:48,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-17T12:38:48,073 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/2d31e542e7464c38a0ad5bf388457528, entries=200, sequenceid=55, filesize=14.0 K 2024-12-17T12:38:48,073 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/13b6408319b24f0a9ae6d27db53d246c as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/13b6408319b24f0a9ae6d27db53d246c 2024-12-17T12:38:48,077 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/13b6408319b24f0a9ae6d27db53d246c, entries=150, sequenceid=55, filesize=11.7 K 2024-12-17T12:38:48,078 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/9906bd394c1a49929023dc286d5877e6 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/9906bd394c1a49929023dc286d5877e6 2024-12-17T12:38:48,081 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/9906bd394c1a49929023dc286d5877e6, entries=150, sequenceid=55, filesize=11.7 K 2024-12-17T12:38:48,082 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=107.34 KB/109920 for 49a464a6255681856b85f50432ce7984 in 1250ms, sequenceid=55, compaction requested=true 2024-12-17T12:38:48,082 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 49a464a6255681856b85f50432ce7984: 2024-12-17T12:38:48,082 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 49a464a6255681856b85f50432ce7984:A, priority=-2147483648, current under compaction store size is 1 2024-12-17T12:38:48,082 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:38:48,082 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:48,083 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:38:48,083 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 49a464a6255681856b85f50432ce7984:B, priority=-2147483648, current under compaction store size is 2 2024-12-17T12:38:48,083 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:48,083 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 49a464a6255681856b85f50432ce7984:C, priority=-2147483648, current under compaction store size is 3 2024-12-17T12:38:48,083 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:38:48,084 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38343 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:38:48,084 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): 49a464a6255681856b85f50432ce7984/A is initiating minor compaction (all files) 2024-12-17T12:38:48,084 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 49a464a6255681856b85f50432ce7984/A in TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:48,084 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/5cd31a3c660a4f0aafb8f848f5c6e7e8, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/ad49a32f2a2947d99035999fd5d4ffdb, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/2d31e542e7464c38a0ad5bf388457528] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp, totalSize=37.4 K 2024-12-17T12:38:48,084 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:38:48,084 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 49a464a6255681856b85f50432ce7984/B is initiating minor compaction (all files) 2024-12-17T12:38:48,084 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 49a464a6255681856b85f50432ce7984/B in TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:48,084 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5cd31a3c660a4f0aafb8f848f5c6e7e8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1734439126373 2024-12-17T12:38:48,084 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/d9d4db24b2314cb8964f2ea8d1c0ab25, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/b477db99b9c5475fa709d9c1291e6391, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/13b6408319b24f0a9ae6d27db53d246c] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp, totalSize=35.2 K 2024-12-17T12:38:48,084 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting ad49a32f2a2947d99035999fd5d4ffdb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1734439126397 2024-12-17T12:38:48,085 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting d9d4db24b2314cb8964f2ea8d1c0ab25, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1734439126373 2024-12-17T12:38:48,085 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2d31e542e7464c38a0ad5bf388457528, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1734439126721 2024-12-17T12:38:48,085 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting b477db99b9c5475fa709d9c1291e6391, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1734439126397 2024-12-17T12:38:48,085 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 13b6408319b24f0a9ae6d27db53d246c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1734439126721 2024-12-17T12:38:48,092 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 49a464a6255681856b85f50432ce7984#B#compaction#206 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-12-17T12:38:48,092 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/6229fbeb51eb4275b4e7f73e20e2494a is 50, key is test_row_0/B:col10/1734439126830/Put/seqid=0 2024-12-17T12:38:48,094 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 49a464a6255681856b85f50432ce7984#A#compaction#207 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-12-17T12:38:48,095 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/7ca4a23ffa1e4930b7a301063f313bfa is 50, key is test_row_0/A:col10/1734439126830/Put/seqid=0 2024-12-17T12:38:48,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742081_1257 (size=12104) 2024-12-17T12:38:48,120 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/6229fbeb51eb4275b4e7f73e20e2494a as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/6229fbeb51eb4275b4e7f73e20e2494a 2024-12-17T12:38:48,124 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 49a464a6255681856b85f50432ce7984/B of 49a464a6255681856b85f50432ce7984 into 6229fbeb51eb4275b4e7f73e20e2494a(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:38:48,124 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 49a464a6255681856b85f50432ce7984: 2024-12-17T12:38:48,124 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984., storeName=49a464a6255681856b85f50432ce7984/B, priority=13, startTime=1734439128082; duration=0sec 2024-12-17T12:38:48,124 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:38:48,124 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 49a464a6255681856b85f50432ce7984:B 2024-12-17T12:38:48,125 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:38:48,125 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:38:48,126 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 49a464a6255681856b85f50432ce7984/C is initiating minor compaction (all files) 2024-12-17T12:38:48,126 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 49a464a6255681856b85f50432ce7984/C in TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:48,126 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/95eebe543b14412685a92aeac5787a4e, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/bb72041219cd4e6a9c1d44ed078a8233, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/9906bd394c1a49929023dc286d5877e6] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp, totalSize=35.2 K 2024-12-17T12:38:48,126 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 95eebe543b14412685a92aeac5787a4e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1734439126373 2024-12-17T12:38:48,127 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting bb72041219cd4e6a9c1d44ed078a8233, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1734439126397 2024-12-17T12:38:48,127 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 9906bd394c1a49929023dc286d5877e6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1734439126721 2024-12-17T12:38:48,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742082_1258 (size=12104) 2024-12-17T12:38:48,141 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/7ca4a23ffa1e4930b7a301063f313bfa as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/7ca4a23ffa1e4930b7a301063f313bfa 2024-12-17T12:38:48,147 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 49a464a6255681856b85f50432ce7984/A of 49a464a6255681856b85f50432ce7984 into 7ca4a23ffa1e4930b7a301063f313bfa(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:38:48,147 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 49a464a6255681856b85f50432ce7984: 2024-12-17T12:38:48,147 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984., storeName=49a464a6255681856b85f50432ce7984/A, priority=13, startTime=1734439128082; duration=0sec 2024-12-17T12:38:48,148 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:48,148 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 49a464a6255681856b85f50432ce7984:A 2024-12-17T12:38:48,149 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 49a464a6255681856b85f50432ce7984#C#compaction#208 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-12-17T12:38:48,149 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/1f1199763c634d36885e8853a89f2366 is 50, key is test_row_0/C:col10/1734439126830/Put/seqid=0 2024-12-17T12:38:48,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742083_1259 (size=12104) 2024-12-17T12:38:48,188 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:48,188 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-17T12:38:48,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:48,189 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2837): Flushing 49a464a6255681856b85f50432ce7984 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-17T12:38:48,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=A 2024-12-17T12:38:48,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:48,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=B 2024-12-17T12:38:48,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:48,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=C 2024-12-17T12:38:48,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:48,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/c188df83cf804cd8b2584aaa40dfdf09 is 50, key is test_row_0/A:col10/1734439126843/Put/seqid=0 2024-12-17T12:38:48,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742084_1260 (size=12001) 2024-12-17T12:38:48,202 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/c188df83cf804cd8b2584aaa40dfdf09 2024-12-17T12:38:48,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/510998c3a05743e2a6ddfa67bc44231c is 50, key is test_row_0/B:col10/1734439126843/Put/seqid=0 2024-12-17T12:38:48,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742085_1261 (size=12001) 2024-12-17T12:38:48,222 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/510998c3a05743e2a6ddfa67bc44231c 2024-12-17T12:38:48,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/7820923e94c8497bb2d11e679d162f91 is 50, key is test_row_0/C:col10/1734439126843/Put/seqid=0 2024-12-17T12:38:48,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742086_1262 (size=12001) 2024-12-17T12:38:48,246 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/7820923e94c8497bb2d11e679d162f91 2024-12-17T12:38:48,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/c188df83cf804cd8b2584aaa40dfdf09 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/c188df83cf804cd8b2584aaa40dfdf09 2024-12-17T12:38:48,259 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/c188df83cf804cd8b2584aaa40dfdf09, entries=150, sequenceid=76, filesize=11.7 K 2024-12-17T12:38:48,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/510998c3a05743e2a6ddfa67bc44231c as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/510998c3a05743e2a6ddfa67bc44231c 2024-12-17T12:38:48,267 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/510998c3a05743e2a6ddfa67bc44231c, entries=150, sequenceid=76, filesize=11.7 K 2024-12-17T12:38:48,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/7820923e94c8497bb2d11e679d162f91 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/7820923e94c8497bb2d11e679d162f91 2024-12-17T12:38:48,276 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/7820923e94c8497bb2d11e679d162f91, entries=150, sequenceid=76, filesize=11.7 K 2024-12-17T12:38:48,277 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=0 B/0 for 49a464a6255681856b85f50432ce7984 in 89ms, sequenceid=76, compaction requested=false 2024-12-17T12:38:48,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2538): Flush status journal for 49a464a6255681856b85f50432ce7984: 2024-12-17T12:38:48,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:48,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=62 2024-12-17T12:38:48,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4106): Remote procedure done, pid=62 2024-12-17T12:38:48,279 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=62, resume processing ppid=61 2024-12-17T12:38:48,280 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, ppid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3080 sec 2024-12-17T12:38:48,281 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees in 1.3120 sec 2024-12-17T12:38:48,558 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/1f1199763c634d36885e8853a89f2366 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/1f1199763c634d36885e8853a89f2366 2024-12-17T12:38:48,562 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 49a464a6255681856b85f50432ce7984/C of 49a464a6255681856b85f50432ce7984 into 1f1199763c634d36885e8853a89f2366(size=11.8 K), total size for store is 23.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:38:48,562 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 49a464a6255681856b85f50432ce7984: 2024-12-17T12:38:48,562 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984., storeName=49a464a6255681856b85f50432ce7984/C, priority=13, startTime=1734439128083; duration=0sec 2024-12-17T12:38:48,562 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:48,562 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 49a464a6255681856b85f50432ce7984:C 2024-12-17T12:38:48,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 49a464a6255681856b85f50432ce7984 2024-12-17T12:38:48,972 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 49a464a6255681856b85f50432ce7984 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-17T12:38:48,973 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=A 2024-12-17T12:38:48,973 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:48,973 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=B 2024-12-17T12:38:48,973 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:48,973 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=C 2024-12-17T12:38:48,973 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:48,977 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/a343c1ac3ad5466dbd5b5461c66619bc is 50, key is test_row_0/A:col10/1734439128971/Put/seqid=0 2024-12-17T12:38:48,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742087_1263 (size=14341) 2024-12-17T12:38:48,982 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/a343c1ac3ad5466dbd5b5461c66619bc 2024-12-17T12:38:48,991 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/7821ab9463f748d98f5072cd4c44b924 is 50, key is test_row_0/B:col10/1734439128971/Put/seqid=0 2024-12-17T12:38:48,990 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:48,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439188987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:48,993 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:48,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439188989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:48,993 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:48,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439188990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:48,994 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:48,994 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:48,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439188990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:48,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439188990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:48,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742088_1264 (size=12001) 2024-12-17T12:38:48,996 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/7821ab9463f748d98f5072cd4c44b924 2024-12-17T12:38:49,005 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/a46096929c384d82b84eb17d98dbc4d5 is 50, key is test_row_0/C:col10/1734439128971/Put/seqid=0 2024-12-17T12:38:49,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742089_1265 (size=12001) 2024-12-17T12:38:49,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-17T12:38:49,073 INFO [Thread-1149 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 61 completed 2024-12-17T12:38:49,074 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-17T12:38:49,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=63, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees 2024-12-17T12:38:49,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-17T12:38:49,075 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=63, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-17T12:38:49,075 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=63, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-17T12:38:49,075 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-17T12:38:49,093 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:49,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439189091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:49,095 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:49,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439189094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:49,096 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:49,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439189094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:49,096 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:49,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439189095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:49,096 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:49,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439189095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:49,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-17T12:38:49,226 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:49,227 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-12-17T12:38:49,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:49,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. as already flushing 2024-12-17T12:38:49,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:49,227 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:49,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:49,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:49,295 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:49,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439189295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:49,297 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:49,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439189296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:49,298 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:49,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439189297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:49,298 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:49,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439189297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:49,298 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:49,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439189297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:49,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-17T12:38:49,379 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:49,379 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-12-17T12:38:49,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:49,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. as already flushing 2024-12-17T12:38:49,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:49,379 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:49,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:49,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:49,414 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/a46096929c384d82b84eb17d98dbc4d5 2024-12-17T12:38:49,417 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/a343c1ac3ad5466dbd5b5461c66619bc as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/a343c1ac3ad5466dbd5b5461c66619bc 2024-12-17T12:38:49,421 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/a343c1ac3ad5466dbd5b5461c66619bc, entries=200, sequenceid=89, filesize=14.0 K 2024-12-17T12:38:49,422 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/7821ab9463f748d98f5072cd4c44b924 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/7821ab9463f748d98f5072cd4c44b924 2024-12-17T12:38:49,426 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/7821ab9463f748d98f5072cd4c44b924, entries=150, sequenceid=89, filesize=11.7 K 2024-12-17T12:38:49,427 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/a46096929c384d82b84eb17d98dbc4d5 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/a46096929c384d82b84eb17d98dbc4d5 2024-12-17T12:38:49,430 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/a46096929c384d82b84eb17d98dbc4d5, entries=150, sequenceid=89, filesize=11.7 K 2024-12-17T12:38:49,430 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 49a464a6255681856b85f50432ce7984 in 459ms, sequenceid=89, compaction requested=true 2024-12-17T12:38:49,430 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 49a464a6255681856b85f50432ce7984: 2024-12-17T12:38:49,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 49a464a6255681856b85f50432ce7984:A, priority=-2147483648, current under compaction store size is 1 2024-12-17T12:38:49,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:49,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 49a464a6255681856b85f50432ce7984:B, priority=-2147483648, current under compaction store size is 2 2024-12-17T12:38:49,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:49,430 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:38:49,430 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:38:49,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 49a464a6255681856b85f50432ce7984:C, priority=-2147483648, current under compaction store size is 3 2024-12-17T12:38:49,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:38:49,431 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38446 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:38:49,431 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:38:49,431 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): 49a464a6255681856b85f50432ce7984/A is initiating minor compaction (all files) 2024-12-17T12:38:49,431 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 49a464a6255681856b85f50432ce7984/B is initiating minor compaction (all files) 2024-12-17T12:38:49,431 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 49a464a6255681856b85f50432ce7984/A in TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:49,431 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 49a464a6255681856b85f50432ce7984/B in TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:49,431 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/7ca4a23ffa1e4930b7a301063f313bfa, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/c188df83cf804cd8b2584aaa40dfdf09, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/a343c1ac3ad5466dbd5b5461c66619bc] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp, totalSize=37.5 K 2024-12-17T12:38:49,432 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/6229fbeb51eb4275b4e7f73e20e2494a, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/510998c3a05743e2a6ddfa67bc44231c, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/7821ab9463f748d98f5072cd4c44b924] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp, totalSize=35.3 K 2024-12-17T12:38:49,432 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7ca4a23ffa1e4930b7a301063f313bfa, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1734439126721 2024-12-17T12:38:49,432 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 6229fbeb51eb4275b4e7f73e20e2494a, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1734439126721 2024-12-17T12:38:49,432 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting c188df83cf804cd8b2584aaa40dfdf09, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1734439126843 2024-12-17T12:38:49,432 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 510998c3a05743e2a6ddfa67bc44231c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1734439126843 2024-12-17T12:38:49,432 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting a343c1ac3ad5466dbd5b5461c66619bc, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1734439128968 2024-12-17T12:38:49,433 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 7821ab9463f748d98f5072cd4c44b924, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1734439128970 2024-12-17T12:38:49,438 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 49a464a6255681856b85f50432ce7984#B#compaction#215 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 53.85 MB/second 2024-12-17T12:38:49,439 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 49a464a6255681856b85f50432ce7984#A#compaction#216 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-12-17T12:38:49,439 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/9cb43f670c5542e59b2d9ce8712c6892 is 50, key is test_row_0/B:col10/1734439128971/Put/seqid=0 2024-12-17T12:38:49,439 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/3a35f82678d344c6931b1473be1c80e8 is 50, key is test_row_0/A:col10/1734439128971/Put/seqid=0 2024-12-17T12:38:49,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742090_1266 (size=12207) 2024-12-17T12:38:49,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742091_1267 (size=12207) 2024-12-17T12:38:49,531 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:49,531 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-12-17T12:38:49,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:49,532 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2837): Flushing 49a464a6255681856b85f50432ce7984 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-17T12:38:49,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=A 2024-12-17T12:38:49,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:49,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=B 2024-12-17T12:38:49,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:49,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=C 2024-12-17T12:38:49,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:49,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/79bc3398f3384b6ca968c00e4800cd78 is 50, key is test_row_0/A:col10/1734439128986/Put/seqid=0 2024-12-17T12:38:49,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742092_1268 (size=12001) 2024-12-17T12:38:49,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 49a464a6255681856b85f50432ce7984 2024-12-17T12:38:49,599 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. as already flushing 2024-12-17T12:38:49,606 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:49,606 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:49,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439189603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:49,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439189603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:49,607 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:49,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439189605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:49,608 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:49,608 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:49,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439189606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:49,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439189606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:49,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-17T12:38:49,707 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:49,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439189707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:49,708 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:49,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439189707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:49,710 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:49,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439189708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:49,710 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:49,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439189708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:49,710 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:49,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439189709, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:49,849 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/9cb43f670c5542e59b2d9ce8712c6892 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/9cb43f670c5542e59b2d9ce8712c6892 2024-12-17T12:38:49,853 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 49a464a6255681856b85f50432ce7984/B of 49a464a6255681856b85f50432ce7984 into 9cb43f670c5542e59b2d9ce8712c6892(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:38:49,853 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 49a464a6255681856b85f50432ce7984: 2024-12-17T12:38:49,853 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984., storeName=49a464a6255681856b85f50432ce7984/B, priority=13, startTime=1734439129430; duration=0sec 2024-12-17T12:38:49,853 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:38:49,853 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 49a464a6255681856b85f50432ce7984:B 2024-12-17T12:38:49,854 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:38:49,854 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:38:49,854 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 49a464a6255681856b85f50432ce7984/C is initiating minor compaction (all files) 2024-12-17T12:38:49,855 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 49a464a6255681856b85f50432ce7984/C in TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:49,855 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/1f1199763c634d36885e8853a89f2366, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/7820923e94c8497bb2d11e679d162f91, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/a46096929c384d82b84eb17d98dbc4d5] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp, totalSize=35.3 K 2024-12-17T12:38:49,855 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 1f1199763c634d36885e8853a89f2366, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1734439126721 2024-12-17T12:38:49,855 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 7820923e94c8497bb2d11e679d162f91, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1734439126843 2024-12-17T12:38:49,856 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting a46096929c384d82b84eb17d98dbc4d5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1734439128970 2024-12-17T12:38:49,856 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/3a35f82678d344c6931b1473be1c80e8 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/3a35f82678d344c6931b1473be1c80e8 2024-12-17T12:38:49,860 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 49a464a6255681856b85f50432ce7984/A of 49a464a6255681856b85f50432ce7984 into 3a35f82678d344c6931b1473be1c80e8(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:38:49,860 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 49a464a6255681856b85f50432ce7984: 2024-12-17T12:38:49,860 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984., storeName=49a464a6255681856b85f50432ce7984/A, priority=13, startTime=1734439129430; duration=0sec 2024-12-17T12:38:49,860 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:49,860 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 49a464a6255681856b85f50432ce7984:A 2024-12-17T12:38:49,861 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 49a464a6255681856b85f50432ce7984#C#compaction#218 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-12-17T12:38:49,862 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/2f82e69a811a4211ab1ec06115470215 is 50, key is test_row_0/C:col10/1734439128971/Put/seqid=0 2024-12-17T12:38:49,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742093_1269 (size=12207) 2024-12-17T12:38:49,910 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:49,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439189909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:49,911 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:49,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439189909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:49,913 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:49,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439189911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:49,913 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:49,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439189911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:49,913 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:49,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439189912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:49,940 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=113 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/79bc3398f3384b6ca968c00e4800cd78 2024-12-17T12:38:49,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/0e85790e318d49af9219b9699cf7177d is 50, key is test_row_0/B:col10/1734439128986/Put/seqid=0 2024-12-17T12:38:49,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742094_1270 (size=12001) 2024-12-17T12:38:49,950 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=113 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/0e85790e318d49af9219b9699cf7177d 2024-12-17T12:38:49,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/e3e15aaabfed45bc952d9d83bf4a12d6 is 50, key is test_row_0/C:col10/1734439128986/Put/seqid=0 2024-12-17T12:38:49,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742095_1271 (size=12001) 2024-12-17T12:38:50,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-17T12:38:50,213 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:50,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439190212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:50,213 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:50,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439190212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:50,215 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:50,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439190213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:50,215 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:50,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439190214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:50,216 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:50,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439190214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:50,269 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/2f82e69a811a4211ab1ec06115470215 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/2f82e69a811a4211ab1ec06115470215 2024-12-17T12:38:50,273 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 49a464a6255681856b85f50432ce7984/C of 49a464a6255681856b85f50432ce7984 into 2f82e69a811a4211ab1ec06115470215(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:38:50,273 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 49a464a6255681856b85f50432ce7984: 2024-12-17T12:38:50,273 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984., storeName=49a464a6255681856b85f50432ce7984/C, priority=13, startTime=1734439129430; duration=0sec 2024-12-17T12:38:50,273 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:50,274 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 49a464a6255681856b85f50432ce7984:C 2024-12-17T12:38:50,360 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=113 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/e3e15aaabfed45bc952d9d83bf4a12d6 2024-12-17T12:38:50,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/79bc3398f3384b6ca968c00e4800cd78 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/79bc3398f3384b6ca968c00e4800cd78 2024-12-17T12:38:50,367 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/79bc3398f3384b6ca968c00e4800cd78, entries=150, sequenceid=113, filesize=11.7 K 2024-12-17T12:38:50,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/0e85790e318d49af9219b9699cf7177d as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/0e85790e318d49af9219b9699cf7177d 2024-12-17T12:38:50,371 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/0e85790e318d49af9219b9699cf7177d, entries=150, sequenceid=113, filesize=11.7 K 2024-12-17T12:38:50,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/e3e15aaabfed45bc952d9d83bf4a12d6 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/e3e15aaabfed45bc952d9d83bf4a12d6 2024-12-17T12:38:50,376 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/e3e15aaabfed45bc952d9d83bf4a12d6, entries=150, sequenceid=113, filesize=11.7 K 2024-12-17T12:38:50,376 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 49a464a6255681856b85f50432ce7984 in 844ms, sequenceid=113, compaction requested=false 2024-12-17T12:38:50,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2538): Flush status journal for 49a464a6255681856b85f50432ce7984: 2024-12-17T12:38:50,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:50,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=64 2024-12-17T12:38:50,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4106): Remote procedure done, pid=64 2024-12-17T12:38:50,379 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=64, resume processing ppid=63 2024-12-17T12:38:50,379 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, ppid=63, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3020 sec 2024-12-17T12:38:50,380 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees in 1.3050 sec 2024-12-17T12:38:50,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 49a464a6255681856b85f50432ce7984 2024-12-17T12:38:50,716 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 49a464a6255681856b85f50432ce7984 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-17T12:38:50,717 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=A 2024-12-17T12:38:50,717 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:50,717 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=B 2024-12-17T12:38:50,717 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:50,717 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=C 2024-12-17T12:38:50,717 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:50,724 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/1884618863f44588b6d2a6cfef9e2c26 is 50, key is test_row_0/A:col10/1734439129602/Put/seqid=0 2024-12-17T12:38:50,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742096_1272 (size=12051) 2024-12-17T12:38:50,734 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:50,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439190731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:50,734 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:50,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439190731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:50,735 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:50,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439190732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:50,736 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:50,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439190733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:50,736 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:50,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439190734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:50,836 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:50,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439190835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:50,836 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:50,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439190835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:50,836 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:50,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439190836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:50,838 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:50,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439190837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:50,838 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:50,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439190837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:51,039 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:51,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439191038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:51,039 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:51,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439191038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:51,039 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:51,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439191038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:51,040 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:51,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439191039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:51,041 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:51,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439191040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:51,128 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/1884618863f44588b6d2a6cfef9e2c26 2024-12-17T12:38:51,135 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/10979d10a72f4cf5bb6fedb469812f86 is 50, key is test_row_0/B:col10/1734439129602/Put/seqid=0 2024-12-17T12:38:51,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742097_1273 (size=12051) 2024-12-17T12:38:51,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-17T12:38:51,178 INFO [Thread-1149 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 63 completed 2024-12-17T12:38:51,179 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-17T12:38:51,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=65, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees 2024-12-17T12:38:51,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-17T12:38:51,180 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=65, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-17T12:38:51,180 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=65, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-17T12:38:51,180 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=66, ppid=65, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-17T12:38:51,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-17T12:38:51,331 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:51,332 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-17T12:38:51,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:51,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. as already flushing 2024-12-17T12:38:51,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:51,332 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:51,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:51,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:51,341 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:51,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439191340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:51,343 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:51,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439191341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:51,343 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:51,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439191342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:51,343 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:51,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439191342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:51,344 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:51,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439191343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:51,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-17T12:38:51,484 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:51,484 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-17T12:38:51,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:51,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. as already flushing 2024-12-17T12:38:51,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:51,484 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:51,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:51,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:51,538 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/10979d10a72f4cf5bb6fedb469812f86 2024-12-17T12:38:51,544 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/73417b57c2cf4fa1b95dfc4a7234e55b is 50, key is test_row_0/C:col10/1734439129602/Put/seqid=0 2024-12-17T12:38:51,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742098_1274 (size=12051) 2024-12-17T12:38:51,636 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:51,637 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-17T12:38:51,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:51,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. as already flushing 2024-12-17T12:38:51,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:51,637 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:51,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:51,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:51,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-17T12:38:51,789 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:51,789 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-17T12:38:51,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:51,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. as already flushing 2024-12-17T12:38:51,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:51,789 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:51,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:51,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:51,843 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:51,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439191842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:51,846 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:51,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439191845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:51,847 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:51,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439191846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:51,848 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:51,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439191848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:51,849 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:51,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439191848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:51,941 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:51,943 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-17T12:38:51,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:51,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. as already flushing 2024-12-17T12:38:51,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:51,944 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:51,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:51,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:51,947 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/73417b57c2cf4fa1b95dfc4a7234e55b 2024-12-17T12:38:51,951 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/1884618863f44588b6d2a6cfef9e2c26 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/1884618863f44588b6d2a6cfef9e2c26 2024-12-17T12:38:51,955 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/1884618863f44588b6d2a6cfef9e2c26, entries=150, sequenceid=129, filesize=11.8 K 2024-12-17T12:38:51,956 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/10979d10a72f4cf5bb6fedb469812f86 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/10979d10a72f4cf5bb6fedb469812f86 2024-12-17T12:38:51,960 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/10979d10a72f4cf5bb6fedb469812f86, entries=150, sequenceid=129, filesize=11.8 K 2024-12-17T12:38:51,960 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/73417b57c2cf4fa1b95dfc4a7234e55b as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/73417b57c2cf4fa1b95dfc4a7234e55b 2024-12-17T12:38:51,964 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/73417b57c2cf4fa1b95dfc4a7234e55b, entries=150, sequenceid=129, filesize=11.8 K 2024-12-17T12:38:51,965 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 49a464a6255681856b85f50432ce7984 in 1249ms, sequenceid=129, compaction requested=true 2024-12-17T12:38:51,965 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 49a464a6255681856b85f50432ce7984: 2024-12-17T12:38:51,965 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 49a464a6255681856b85f50432ce7984:A, priority=-2147483648, current under compaction store size is 1 2024-12-17T12:38:51,965 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:51,965 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 49a464a6255681856b85f50432ce7984:B, priority=-2147483648, current under compaction store size is 2 2024-12-17T12:38:51,965 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:51,965 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:38:51,965 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:38:51,965 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 49a464a6255681856b85f50432ce7984:C, priority=-2147483648, current under compaction store size is 3 2024-12-17T12:38:51,965 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:38:51,966 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:38:51,966 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): 49a464a6255681856b85f50432ce7984/A is initiating minor compaction (all files) 2024-12-17T12:38:51,966 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 49a464a6255681856b85f50432ce7984/A in TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:51,966 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/3a35f82678d344c6931b1473be1c80e8, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/79bc3398f3384b6ca968c00e4800cd78, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/1884618863f44588b6d2a6cfef9e2c26] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp, totalSize=35.4 K 2024-12-17T12:38:51,966 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:38:51,966 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 49a464a6255681856b85f50432ce7984/B is initiating minor compaction (all files) 2024-12-17T12:38:51,966 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 49a464a6255681856b85f50432ce7984/B in TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:51,967 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/9cb43f670c5542e59b2d9ce8712c6892, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/0e85790e318d49af9219b9699cf7177d, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/10979d10a72f4cf5bb6fedb469812f86] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp, totalSize=35.4 K 2024-12-17T12:38:51,967 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 9cb43f670c5542e59b2d9ce8712c6892, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1734439128970 2024-12-17T12:38:51,967 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3a35f82678d344c6931b1473be1c80e8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1734439128970 2024-12-17T12:38:51,968 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 79bc3398f3384b6ca968c00e4800cd78, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=113, earliestPutTs=1734439128986 2024-12-17T12:38:51,968 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 0e85790e318d49af9219b9699cf7177d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=113, earliestPutTs=1734439128986 2024-12-17T12:38:51,968 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 10979d10a72f4cf5bb6fedb469812f86, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1734439129602 2024-12-17T12:38:51,968 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1884618863f44588b6d2a6cfef9e2c26, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1734439129602 2024-12-17T12:38:51,978 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 49a464a6255681856b85f50432ce7984#B#compaction#224 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-12-17T12:38:51,978 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/1c6641476d9d47e7ab7a2a705c4d7f25 is 50, key is test_row_0/B:col10/1734439129602/Put/seqid=0 2024-12-17T12:38:51,988 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 49a464a6255681856b85f50432ce7984#A#compaction#225 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-12-17T12:38:51,989 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/0871be2e44c94e1db2d46460e8c33ce9 is 50, key is test_row_0/A:col10/1734439129602/Put/seqid=0 2024-12-17T12:38:52,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742099_1275 (size=12359) 2024-12-17T12:38:52,016 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/1c6641476d9d47e7ab7a2a705c4d7f25 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/1c6641476d9d47e7ab7a2a705c4d7f25 2024-12-17T12:38:52,023 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 49a464a6255681856b85f50432ce7984/B of 49a464a6255681856b85f50432ce7984 into 1c6641476d9d47e7ab7a2a705c4d7f25(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:38:52,023 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 49a464a6255681856b85f50432ce7984: 2024-12-17T12:38:52,023 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984., storeName=49a464a6255681856b85f50432ce7984/B, priority=13, startTime=1734439131965; duration=0sec 2024-12-17T12:38:52,023 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:38:52,023 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 49a464a6255681856b85f50432ce7984:B 2024-12-17T12:38:52,023 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:38:52,024 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:38:52,024 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 49a464a6255681856b85f50432ce7984/C is initiating minor compaction (all files) 2024-12-17T12:38:52,024 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 49a464a6255681856b85f50432ce7984/C in TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:52,025 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/2f82e69a811a4211ab1ec06115470215, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/e3e15aaabfed45bc952d9d83bf4a12d6, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/73417b57c2cf4fa1b95dfc4a7234e55b] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp, totalSize=35.4 K 2024-12-17T12:38:52,025 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 2f82e69a811a4211ab1ec06115470215, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1734439128970 2024-12-17T12:38:52,025 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting e3e15aaabfed45bc952d9d83bf4a12d6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=113, earliestPutTs=1734439128986 2024-12-17T12:38:52,025 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 73417b57c2cf4fa1b95dfc4a7234e55b, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1734439129602 2024-12-17T12:38:52,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742100_1276 (size=12359) 2024-12-17T12:38:52,037 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 49a464a6255681856b85f50432ce7984#C#compaction#226 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-12-17T12:38:52,038 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/228fa8efcecf4608b64d6397d95eb269 is 50, key is test_row_0/C:col10/1734439129602/Put/seqid=0 2024-12-17T12:38:52,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742101_1277 (size=12359) 2024-12-17T12:38:52,090 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/228fa8efcecf4608b64d6397d95eb269 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/228fa8efcecf4608b64d6397d95eb269 2024-12-17T12:38:52,095 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 49a464a6255681856b85f50432ce7984/C of 49a464a6255681856b85f50432ce7984 into 228fa8efcecf4608b64d6397d95eb269(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:38:52,095 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 49a464a6255681856b85f50432ce7984: 2024-12-17T12:38:52,095 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984., storeName=49a464a6255681856b85f50432ce7984/C, priority=13, startTime=1734439131965; duration=0sec 2024-12-17T12:38:52,095 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:52,095 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 49a464a6255681856b85f50432ce7984:C 2024-12-17T12:38:52,095 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:52,096 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-17T12:38:52,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:52,096 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2837): Flushing 49a464a6255681856b85f50432ce7984 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-17T12:38:52,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=A 2024-12-17T12:38:52,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:52,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=B 2024-12-17T12:38:52,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:52,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=C 2024-12-17T12:38:52,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:52,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/1d68b43afbe24077be932102174f9a7c is 50, key is test_row_0/A:col10/1734439130733/Put/seqid=0 2024-12-17T12:38:52,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742102_1278 (size=12151) 2024-12-17T12:38:52,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-17T12:38:52,437 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/0871be2e44c94e1db2d46460e8c33ce9 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/0871be2e44c94e1db2d46460e8c33ce9 2024-12-17T12:38:52,441 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 49a464a6255681856b85f50432ce7984/A of 49a464a6255681856b85f50432ce7984 into 0871be2e44c94e1db2d46460e8c33ce9(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:38:52,441 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 49a464a6255681856b85f50432ce7984: 2024-12-17T12:38:52,441 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984., storeName=49a464a6255681856b85f50432ce7984/A, priority=13, startTime=1734439131965; duration=0sec 2024-12-17T12:38:52,441 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:52,441 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 49a464a6255681856b85f50432ce7984:A 2024-12-17T12:38:52,509 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/1d68b43afbe24077be932102174f9a7c 2024-12-17T12:38:52,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/5b500d888cb94cada93cc8aa44bb3565 is 50, key is test_row_0/B:col10/1734439130733/Put/seqid=0 2024-12-17T12:38:52,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742103_1279 (size=12151) 2024-12-17T12:38:52,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 49a464a6255681856b85f50432ce7984 2024-12-17T12:38:52,853 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. as already flushing 2024-12-17T12:38:52,858 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:52,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439192856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:52,859 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:52,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439192857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:52,860 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:52,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439192857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:52,860 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:52,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439192858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:52,860 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:52,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439192858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:52,919 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/5b500d888cb94cada93cc8aa44bb3565 2024-12-17T12:38:52,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/cba9583a6af84ccfbc2f011dcebfaa1a is 50, key is test_row_0/C:col10/1734439130733/Put/seqid=0 2024-12-17T12:38:52,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742104_1280 (size=12151) 2024-12-17T12:38:52,960 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:52,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439192959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:52,961 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:52,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439192960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:52,962 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:52,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439192960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:52,963 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:52,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439192961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:52,963 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:52,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439192961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:53,162 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:53,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439193161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:53,164 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:53,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439193163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:53,165 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:53,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439193164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:53,165 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:53,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439193164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:53,166 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:53,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439193165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:53,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-17T12:38:53,330 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/cba9583a6af84ccfbc2f011dcebfaa1a 2024-12-17T12:38:53,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/1d68b43afbe24077be932102174f9a7c as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/1d68b43afbe24077be932102174f9a7c 2024-12-17T12:38:53,338 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/1d68b43afbe24077be932102174f9a7c, entries=150, sequenceid=154, filesize=11.9 K 2024-12-17T12:38:53,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/5b500d888cb94cada93cc8aa44bb3565 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/5b500d888cb94cada93cc8aa44bb3565 2024-12-17T12:38:53,342 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/5b500d888cb94cada93cc8aa44bb3565, entries=150, sequenceid=154, filesize=11.9 K 2024-12-17T12:38:53,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/cba9583a6af84ccfbc2f011dcebfaa1a as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/cba9583a6af84ccfbc2f011dcebfaa1a 2024-12-17T12:38:53,346 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/cba9583a6af84ccfbc2f011dcebfaa1a, entries=150, sequenceid=154, filesize=11.9 K 2024-12-17T12:38:53,347 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 49a464a6255681856b85f50432ce7984 in 1251ms, sequenceid=154, compaction requested=false 2024-12-17T12:38:53,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2538): Flush status journal for 49a464a6255681856b85f50432ce7984: 2024-12-17T12:38:53,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:53,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=66 2024-12-17T12:38:53,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4106): Remote procedure done, pid=66 2024-12-17T12:38:53,349 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=66, resume processing ppid=65 2024-12-17T12:38:53,349 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, ppid=65, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1680 sec 2024-12-17T12:38:53,350 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees in 2.1700 sec 2024-12-17T12:38:53,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 49a464a6255681856b85f50432ce7984 2024-12-17T12:38:53,465 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 49a464a6255681856b85f50432ce7984 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-17T12:38:53,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=A 2024-12-17T12:38:53,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:53,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=B 2024-12-17T12:38:53,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:53,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=C 2024-12-17T12:38:53,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:53,469 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/dbc568b1494b45f1973bbb60042580a8 is 50, key is test_row_0/A:col10/1734439132857/Put/seqid=0 2024-12-17T12:38:53,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742105_1281 (size=12151) 2024-12-17T12:38:53,482 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:53,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439193479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:53,483 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:53,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439193479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:53,483 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:53,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439193480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:53,483 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:53,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439193481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:53,484 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:53,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439193482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:53,585 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:53,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439193583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:53,585 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:53,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439193584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:53,585 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:53,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439193584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:53,585 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:53,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439193584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:53,586 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:53,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439193584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:53,786 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:53,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439193786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:53,787 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:53,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439193786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:53,788 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:53,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439193787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:53,789 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:53,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439193787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:53,789 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:53,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439193787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:53,872 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/dbc568b1494b45f1973bbb60042580a8 2024-12-17T12:38:53,879 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/682be2111e684da9a66be288581e025b is 50, key is test_row_0/B:col10/1734439132857/Put/seqid=0 2024-12-17T12:38:53,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742106_1282 (size=12151) 2024-12-17T12:38:54,089 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:54,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439194088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:54,090 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:54,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439194088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:54,091 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:54,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439194090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:54,091 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:54,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439194090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:54,092 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:54,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439194091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:54,283 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/682be2111e684da9a66be288581e025b 2024-12-17T12:38:54,289 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/821383d7f6344cc786d7d9531ac527fe is 50, key is test_row_0/C:col10/1734439132857/Put/seqid=0 2024-12-17T12:38:54,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742107_1283 (size=12151) 2024-12-17T12:38:54,592 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:54,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439194590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:54,593 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:54,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439194591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:54,593 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:54,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439194592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:54,596 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:54,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439194594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:54,597 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:54,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439194595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:54,694 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/821383d7f6344cc786d7d9531ac527fe 2024-12-17T12:38:54,699 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/dbc568b1494b45f1973bbb60042580a8 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/dbc568b1494b45f1973bbb60042580a8 2024-12-17T12:38:54,703 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/dbc568b1494b45f1973bbb60042580a8, entries=150, sequenceid=169, filesize=11.9 K 2024-12-17T12:38:54,704 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/682be2111e684da9a66be288581e025b as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/682be2111e684da9a66be288581e025b 2024-12-17T12:38:54,707 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/682be2111e684da9a66be288581e025b, entries=150, sequenceid=169, filesize=11.9 K 2024-12-17T12:38:54,707 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/821383d7f6344cc786d7d9531ac527fe as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/821383d7f6344cc786d7d9531ac527fe 2024-12-17T12:38:54,711 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/821383d7f6344cc786d7d9531ac527fe, entries=150, sequenceid=169, filesize=11.9 K 2024-12-17T12:38:54,711 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 49a464a6255681856b85f50432ce7984 in 1246ms, sequenceid=169, compaction requested=true 2024-12-17T12:38:54,711 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 49a464a6255681856b85f50432ce7984: 2024-12-17T12:38:54,711 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 49a464a6255681856b85f50432ce7984:A, priority=-2147483648, current under compaction store size is 1 2024-12-17T12:38:54,712 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:54,712 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:38:54,712 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 49a464a6255681856b85f50432ce7984:B, priority=-2147483648, current under compaction store size is 2 2024-12-17T12:38:54,712 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:54,712 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:38:54,712 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 49a464a6255681856b85f50432ce7984:C, priority=-2147483648, current under compaction store size is 3 2024-12-17T12:38:54,712 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:38:54,712 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36661 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:38:54,712 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 49a464a6255681856b85f50432ce7984/B is initiating minor compaction (all files) 2024-12-17T12:38:54,713 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 49a464a6255681856b85f50432ce7984/B in TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:54,713 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/1c6641476d9d47e7ab7a2a705c4d7f25, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/5b500d888cb94cada93cc8aa44bb3565, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/682be2111e684da9a66be288581e025b] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp, totalSize=35.8 K 2024-12-17T12:38:54,713 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36661 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:38:54,713 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): 49a464a6255681856b85f50432ce7984/A is initiating minor compaction (all files) 2024-12-17T12:38:54,713 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 1c6641476d9d47e7ab7a2a705c4d7f25, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1734439129602 2024-12-17T12:38:54,713 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 49a464a6255681856b85f50432ce7984/A in TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:54,713 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/0871be2e44c94e1db2d46460e8c33ce9, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/1d68b43afbe24077be932102174f9a7c, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/dbc568b1494b45f1973bbb60042580a8] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp, totalSize=35.8 K 2024-12-17T12:38:54,713 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 5b500d888cb94cada93cc8aa44bb3565, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1734439130729 2024-12-17T12:38:54,713 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0871be2e44c94e1db2d46460e8c33ce9, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1734439129602 2024-12-17T12:38:54,714 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 682be2111e684da9a66be288581e025b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1734439132857 2024-12-17T12:38:54,714 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1d68b43afbe24077be932102174f9a7c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1734439130729 2024-12-17T12:38:54,714 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting dbc568b1494b45f1973bbb60042580a8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1734439132857 2024-12-17T12:38:54,722 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 49a464a6255681856b85f50432ce7984#A#compaction#234 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 53.85 MB/second 2024-12-17T12:38:54,722 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 49a464a6255681856b85f50432ce7984#B#compaction#233 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-12-17T12:38:54,722 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/f58bef4f935b4e38bdb0062f3b514412 is 50, key is test_row_0/A:col10/1734439132857/Put/seqid=0 2024-12-17T12:38:54,722 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/8088552e369a4c538c3823fadf232420 is 50, key is test_row_0/B:col10/1734439132857/Put/seqid=0 2024-12-17T12:38:54,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742109_1285 (size=12561) 2024-12-17T12:38:54,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742108_1284 (size=12561) 2024-12-17T12:38:54,763 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/f58bef4f935b4e38bdb0062f3b514412 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/f58bef4f935b4e38bdb0062f3b514412 2024-12-17T12:38:54,769 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 49a464a6255681856b85f50432ce7984/A of 49a464a6255681856b85f50432ce7984 into f58bef4f935b4e38bdb0062f3b514412(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:38:54,769 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 49a464a6255681856b85f50432ce7984: 2024-12-17T12:38:54,769 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984., storeName=49a464a6255681856b85f50432ce7984/A, priority=13, startTime=1734439134711; duration=0sec 2024-12-17T12:38:54,769 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:38:54,769 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 49a464a6255681856b85f50432ce7984:A 2024-12-17T12:38:54,769 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:38:54,770 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36661 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:38:54,771 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): 49a464a6255681856b85f50432ce7984/C is initiating minor compaction (all files) 2024-12-17T12:38:54,771 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 49a464a6255681856b85f50432ce7984/C in TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:54,771 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/228fa8efcecf4608b64d6397d95eb269, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/cba9583a6af84ccfbc2f011dcebfaa1a, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/821383d7f6344cc786d7d9531ac527fe] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp, totalSize=35.8 K 2024-12-17T12:38:54,771 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 228fa8efcecf4608b64d6397d95eb269, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1734439129602 2024-12-17T12:38:54,771 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting cba9583a6af84ccfbc2f011dcebfaa1a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1734439130729 2024-12-17T12:38:54,772 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 821383d7f6344cc786d7d9531ac527fe, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1734439132857 2024-12-17T12:38:54,779 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 49a464a6255681856b85f50432ce7984#C#compaction#235 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-12-17T12:38:54,779 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/aefd822aeffa410bba047b8ec9546387 is 50, key is test_row_0/C:col10/1734439132857/Put/seqid=0 2024-12-17T12:38:54,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742110_1286 (size=12561) 2024-12-17T12:38:55,146 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/8088552e369a4c538c3823fadf232420 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/8088552e369a4c538c3823fadf232420 2024-12-17T12:38:55,149 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 49a464a6255681856b85f50432ce7984/B of 49a464a6255681856b85f50432ce7984 into 8088552e369a4c538c3823fadf232420(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:38:55,149 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 49a464a6255681856b85f50432ce7984: 2024-12-17T12:38:55,149 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984., storeName=49a464a6255681856b85f50432ce7984/B, priority=13, startTime=1734439134712; duration=0sec 2024-12-17T12:38:55,149 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:55,149 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 49a464a6255681856b85f50432ce7984:B 2024-12-17T12:38:55,188 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/aefd822aeffa410bba047b8ec9546387 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/aefd822aeffa410bba047b8ec9546387 2024-12-17T12:38:55,192 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 49a464a6255681856b85f50432ce7984/C of 49a464a6255681856b85f50432ce7984 into aefd822aeffa410bba047b8ec9546387(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:38:55,192 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 49a464a6255681856b85f50432ce7984: 2024-12-17T12:38:55,192 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984., storeName=49a464a6255681856b85f50432ce7984/C, priority=13, startTime=1734439134712; duration=0sec 2024-12-17T12:38:55,192 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:55,192 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 49a464a6255681856b85f50432ce7984:C 2024-12-17T12:38:55,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-17T12:38:55,284 INFO [Thread-1149 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 65 completed 2024-12-17T12:38:55,285 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-17T12:38:55,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=67, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees 2024-12-17T12:38:55,286 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=67, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-17T12:38:55,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-17T12:38:55,287 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=67, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-17T12:38:55,287 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=68, ppid=67, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-17T12:38:55,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-17T12:38:55,438 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:55,439 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-12-17T12:38:55,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:55,439 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2837): Flushing 49a464a6255681856b85f50432ce7984 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-17T12:38:55,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=A 2024-12-17T12:38:55,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:55,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=B 2024-12-17T12:38:55,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:55,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=C 2024-12-17T12:38:55,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:55,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/93f43bdabcf849e98e895738b76f3ac6 is 50, key is test_row_0/A:col10/1734439133481/Put/seqid=0 2024-12-17T12:38:55,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742111_1287 (size=12151) 2024-12-17T12:38:55,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-17T12:38:55,620 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. as already flushing 2024-12-17T12:38:55,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 49a464a6255681856b85f50432ce7984 2024-12-17T12:38:55,629 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:55,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439195626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:55,629 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:55,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439195626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:55,630 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:55,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439195628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:55,632 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:55,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439195629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:55,632 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:55,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439195629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:55,732 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:55,732 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:55,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439195730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:55,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439195730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:55,732 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:55,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439195731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:55,734 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:55,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439195732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:55,734 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:55,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439195733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:55,848 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/93f43bdabcf849e98e895738b76f3ac6 2024-12-17T12:38:55,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/8fc3f86438ac4bf5a34ef98548876232 is 50, key is test_row_0/B:col10/1734439133481/Put/seqid=0 2024-12-17T12:38:55,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742112_1288 (size=12151) 2024-12-17T12:38:55,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-17T12:38:55,934 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:55,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439195933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:55,934 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:55,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439195934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:55,935 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:55,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439195934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:55,937 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:55,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439195936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:55,937 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:55,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439195936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:56,236 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:56,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439196235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:56,238 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:56,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439196237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:56,239 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:56,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439196237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:56,241 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:56,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439196240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:56,241 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:56,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439196240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:56,258 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/8fc3f86438ac4bf5a34ef98548876232 2024-12-17T12:38:56,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/6dddb5c66b844d0db16c3faf5652a17f is 50, key is test_row_0/C:col10/1734439133481/Put/seqid=0 2024-12-17T12:38:56,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742113_1289 (size=12151) 2024-12-17T12:38:56,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-17T12:38:56,668 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/6dddb5c66b844d0db16c3faf5652a17f 2024-12-17T12:38:56,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/93f43bdabcf849e98e895738b76f3ac6 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/93f43bdabcf849e98e895738b76f3ac6 2024-12-17T12:38:56,676 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/93f43bdabcf849e98e895738b76f3ac6, entries=150, sequenceid=194, filesize=11.9 K 2024-12-17T12:38:56,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/8fc3f86438ac4bf5a34ef98548876232 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/8fc3f86438ac4bf5a34ef98548876232 2024-12-17T12:38:56,680 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/8fc3f86438ac4bf5a34ef98548876232, entries=150, sequenceid=194, filesize=11.9 K 2024-12-17T12:38:56,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/6dddb5c66b844d0db16c3faf5652a17f as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/6dddb5c66b844d0db16c3faf5652a17f 2024-12-17T12:38:56,686 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/6dddb5c66b844d0db16c3faf5652a17f, entries=150, sequenceid=194, filesize=11.9 K 2024-12-17T12:38:56,687 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for 49a464a6255681856b85f50432ce7984 in 1248ms, sequenceid=194, compaction requested=false 2024-12-17T12:38:56,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2538): Flush status journal for 49a464a6255681856b85f50432ce7984: 2024-12-17T12:38:56,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:56,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=68 2024-12-17T12:38:56,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4106): Remote procedure done, pid=68 2024-12-17T12:38:56,689 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=68, resume processing ppid=67 2024-12-17T12:38:56,689 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, ppid=67, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4010 sec 2024-12-17T12:38:56,690 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees in 1.4040 sec 2024-12-17T12:38:56,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 49a464a6255681856b85f50432ce7984 2024-12-17T12:38:56,741 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 49a464a6255681856b85f50432ce7984 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-17T12:38:56,741 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=A 2024-12-17T12:38:56,741 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:56,741 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=B 2024-12-17T12:38:56,741 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:56,742 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=C 2024-12-17T12:38:56,742 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:56,746 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/d971cf6ff2b34ed4a093ff5b5003584f is 50, key is test_row_0/A:col10/1734439135627/Put/seqid=0 2024-12-17T12:38:56,757 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:56,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439196754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:56,758 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:56,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439196755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:56,758 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:56,758 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:56,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439196755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:56,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439196755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:56,759 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:56,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439196756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:56,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742114_1290 (size=14541) 2024-12-17T12:38:56,860 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:56,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439196858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:56,860 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:56,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439196859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:56,860 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:56,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439196859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:56,860 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:56,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439196859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:56,861 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:56,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439196859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:57,062 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:57,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439197061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:57,062 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:57,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439197061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:57,063 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:57,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439197062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:57,063 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:57,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439197062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:57,064 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:57,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439197062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:57,170 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/d971cf6ff2b34ed4a093ff5b5003584f 2024-12-17T12:38:57,197 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/c57bd66151594431b4befe5155086ec5 is 50, key is test_row_0/B:col10/1734439135627/Put/seqid=0 2024-12-17T12:38:57,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742115_1291 (size=12151) 2024-12-17T12:38:57,364 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:57,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439197363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:57,366 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:57,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439197365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:57,367 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:57,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439197365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:57,367 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:57,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439197365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:57,368 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:57,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439197366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:57,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-17T12:38:57,390 INFO [Thread-1149 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 67 completed 2024-12-17T12:38:57,391 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-17T12:38:57,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=69, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees 2024-12-17T12:38:57,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-17T12:38:57,392 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=69, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-17T12:38:57,392 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=69, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-17T12:38:57,393 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=70, ppid=69, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-17T12:38:57,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-17T12:38:57,544 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:57,544 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-12-17T12:38:57,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:57,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. as already flushing 2024-12-17T12:38:57,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:57,544 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:57,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:57,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:57,601 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/c57bd66151594431b4befe5155086ec5 2024-12-17T12:38:57,607 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/aa9df7d56f4647558473e7a7838cac79 is 50, key is test_row_0/C:col10/1734439135627/Put/seqid=0 2024-12-17T12:38:57,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742116_1292 (size=12151) 2024-12-17T12:38:57,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-17T12:38:57,696 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:57,696 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-12-17T12:38:57,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:57,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. as already flushing 2024-12-17T12:38:57,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:57,697 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:57,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:57,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:57,848 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:57,849 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-12-17T12:38:57,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:57,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. as already flushing 2024-12-17T12:38:57,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:57,849 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:57,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:57,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:57,869 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:57,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439197868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:57,871 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:57,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439197868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:57,872 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:57,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439197871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:57,873 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:57,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439197871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:57,873 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:57,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439197871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:57,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-17T12:38:58,001 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:58,001 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-12-17T12:38:58,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:58,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. as already flushing 2024-12-17T12:38:58,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:58,002 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:58,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:58,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:58,011 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/aa9df7d56f4647558473e7a7838cac79 2024-12-17T12:38:58,015 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/d971cf6ff2b34ed4a093ff5b5003584f as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/d971cf6ff2b34ed4a093ff5b5003584f 2024-12-17T12:38:58,018 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/d971cf6ff2b34ed4a093ff5b5003584f, entries=200, sequenceid=210, filesize=14.2 K 2024-12-17T12:38:58,019 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/c57bd66151594431b4befe5155086ec5 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/c57bd66151594431b4befe5155086ec5 2024-12-17T12:38:58,022 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/c57bd66151594431b4befe5155086ec5, entries=150, sequenceid=210, filesize=11.9 K 2024-12-17T12:38:58,023 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/aa9df7d56f4647558473e7a7838cac79 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/aa9df7d56f4647558473e7a7838cac79 2024-12-17T12:38:58,027 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/aa9df7d56f4647558473e7a7838cac79, entries=150, sequenceid=210, filesize=11.9 K 2024-12-17T12:38:58,027 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for 49a464a6255681856b85f50432ce7984 in 1286ms, sequenceid=210, compaction requested=true 2024-12-17T12:38:58,028 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 49a464a6255681856b85f50432ce7984: 2024-12-17T12:38:58,028 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 49a464a6255681856b85f50432ce7984:A, priority=-2147483648, current under compaction store size is 1 2024-12-17T12:38:58,028 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:58,028 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 49a464a6255681856b85f50432ce7984:B, priority=-2147483648, current under compaction store size is 2 2024-12-17T12:38:58,028 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:58,028 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:38:58,028 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 49a464a6255681856b85f50432ce7984:C, priority=-2147483648, current under compaction store size is 3 2024-12-17T12:38:58,028 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:38:58,028 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:38:58,029 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:38:58,029 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39253 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:38:58,029 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): 49a464a6255681856b85f50432ce7984/A is initiating minor compaction (all files) 2024-12-17T12:38:58,029 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 49a464a6255681856b85f50432ce7984/B is initiating minor compaction (all files) 2024-12-17T12:38:58,029 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 49a464a6255681856b85f50432ce7984/A in TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:58,029 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 49a464a6255681856b85f50432ce7984/B in TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:58,029 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/8088552e369a4c538c3823fadf232420, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/8fc3f86438ac4bf5a34ef98548876232, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/c57bd66151594431b4befe5155086ec5] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp, totalSize=36.0 K 2024-12-17T12:38:58,029 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/f58bef4f935b4e38bdb0062f3b514412, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/93f43bdabcf849e98e895738b76f3ac6, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/d971cf6ff2b34ed4a093ff5b5003584f] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp, totalSize=38.3 K 2024-12-17T12:38:58,029 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 8088552e369a4c538c3823fadf232420, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1734439132857 2024-12-17T12:38:58,029 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting f58bef4f935b4e38bdb0062f3b514412, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1734439132857 2024-12-17T12:38:58,030 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 8fc3f86438ac4bf5a34ef98548876232, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1734439133478 2024-12-17T12:38:58,030 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 93f43bdabcf849e98e895738b76f3ac6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1734439133478 2024-12-17T12:38:58,030 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting d971cf6ff2b34ed4a093ff5b5003584f, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1734439135624 2024-12-17T12:38:58,030 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting c57bd66151594431b4befe5155086ec5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1734439135627 2024-12-17T12:38:58,036 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 49a464a6255681856b85f50432ce7984#B#compaction#242 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-12-17T12:38:58,036 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/bc42097744664d23bbe9965e01fa7bf4 is 50, key is test_row_0/B:col10/1734439135627/Put/seqid=0 2024-12-17T12:38:58,039 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 49a464a6255681856b85f50432ce7984#A#compaction#243 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-12-17T12:38:58,040 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/a130ff694f5b4125b1836a1f995d47e2 is 50, key is test_row_0/A:col10/1734439135627/Put/seqid=0 2024-12-17T12:38:58,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742117_1293 (size=12663) 2024-12-17T12:38:58,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742118_1294 (size=12663) 2024-12-17T12:38:58,057 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/a130ff694f5b4125b1836a1f995d47e2 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/a130ff694f5b4125b1836a1f995d47e2 2024-12-17T12:38:58,060 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 49a464a6255681856b85f50432ce7984/A of 49a464a6255681856b85f50432ce7984 into a130ff694f5b4125b1836a1f995d47e2(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:38:58,060 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 49a464a6255681856b85f50432ce7984: 2024-12-17T12:38:58,060 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984., storeName=49a464a6255681856b85f50432ce7984/A, priority=13, startTime=1734439138028; duration=0sec 2024-12-17T12:38:58,060 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:38:58,060 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 49a464a6255681856b85f50432ce7984:A 2024-12-17T12:38:58,061 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:38:58,061 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:38:58,061 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): 49a464a6255681856b85f50432ce7984/C is initiating minor compaction (all files) 2024-12-17T12:38:58,061 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 49a464a6255681856b85f50432ce7984/C in TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:58,062 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/aefd822aeffa410bba047b8ec9546387, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/6dddb5c66b844d0db16c3faf5652a17f, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/aa9df7d56f4647558473e7a7838cac79] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp, totalSize=36.0 K 2024-12-17T12:38:58,062 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting aefd822aeffa410bba047b8ec9546387, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1734439132857 2024-12-17T12:38:58,062 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6dddb5c66b844d0db16c3faf5652a17f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1734439133478 2024-12-17T12:38:58,062 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting aa9df7d56f4647558473e7a7838cac79, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1734439135627 2024-12-17T12:38:58,068 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 49a464a6255681856b85f50432ce7984#C#compaction#244 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-12-17T12:38:58,069 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/60200b8fb5e34b4eb9ff41f28b7a2cfc is 50, key is test_row_0/C:col10/1734439135627/Put/seqid=0 2024-12-17T12:38:58,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742119_1295 (size=12663) 2024-12-17T12:38:58,153 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:58,154 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-12-17T12:38:58,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:58,154 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2837): Flushing 49a464a6255681856b85f50432ce7984 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-17T12:38:58,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=A 2024-12-17T12:38:58,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:58,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=B 2024-12-17T12:38:58,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:58,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=C 2024-12-17T12:38:58,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:58,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/2995483d7ea740b99ffdfad0f0d390e7 is 50, key is test_row_0/A:col10/1734439136750/Put/seqid=0 2024-12-17T12:38:58,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742120_1296 (size=12151) 2024-12-17T12:38:58,453 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/bc42097744664d23bbe9965e01fa7bf4 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/bc42097744664d23bbe9965e01fa7bf4 2024-12-17T12:38:58,457 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 49a464a6255681856b85f50432ce7984/B of 49a464a6255681856b85f50432ce7984 into bc42097744664d23bbe9965e01fa7bf4(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:38:58,457 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 49a464a6255681856b85f50432ce7984: 2024-12-17T12:38:58,457 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984., storeName=49a464a6255681856b85f50432ce7984/B, priority=13, startTime=1734439138028; duration=0sec 2024-12-17T12:38:58,457 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:58,457 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 49a464a6255681856b85f50432ce7984:B 2024-12-17T12:38:58,478 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/60200b8fb5e34b4eb9ff41f28b7a2cfc as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/60200b8fb5e34b4eb9ff41f28b7a2cfc 2024-12-17T12:38:58,482 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 49a464a6255681856b85f50432ce7984/C of 49a464a6255681856b85f50432ce7984 into 60200b8fb5e34b4eb9ff41f28b7a2cfc(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:38:58,482 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 49a464a6255681856b85f50432ce7984: 2024-12-17T12:38:58,482 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984., storeName=49a464a6255681856b85f50432ce7984/C, priority=13, startTime=1734439138028; duration=0sec 2024-12-17T12:38:58,482 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:38:58,482 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 49a464a6255681856b85f50432ce7984:C 2024-12-17T12:38:58,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-17T12:38:58,567 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=232 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/2995483d7ea740b99ffdfad0f0d390e7 2024-12-17T12:38:58,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/0637a1377cba4fcf8df859dd346ef2ac is 50, key is test_row_0/B:col10/1734439136750/Put/seqid=0 2024-12-17T12:38:58,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742121_1297 (size=12151) 2024-12-17T12:38:58,577 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=232 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/0637a1377cba4fcf8df859dd346ef2ac 2024-12-17T12:38:58,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/d9f4f9c85240472496f49bf8644826ff is 50, key is test_row_0/C:col10/1734439136750/Put/seqid=0 2024-12-17T12:38:58,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742122_1298 (size=12151) 2024-12-17T12:38:58,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 49a464a6255681856b85f50432ce7984 2024-12-17T12:38:58,875 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. as already flushing 2024-12-17T12:38:58,887 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:58,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439198884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:58,887 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:58,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439198885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:58,887 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:58,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439198885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:58,888 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:58,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439198886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:58,888 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:58,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439198886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:58,988 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=232 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/d9f4f9c85240472496f49bf8644826ff 2024-12-17T12:38:58,989 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:58,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439198988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:58,989 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:58,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439198988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:58,989 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:58,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439198988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:58,989 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:58,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439198988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:58,991 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:58,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439198990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:58,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/2995483d7ea740b99ffdfad0f0d390e7 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/2995483d7ea740b99ffdfad0f0d390e7 2024-12-17T12:38:58,996 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/2995483d7ea740b99ffdfad0f0d390e7, entries=150, sequenceid=232, filesize=11.9 K 2024-12-17T12:38:58,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/0637a1377cba4fcf8df859dd346ef2ac as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/0637a1377cba4fcf8df859dd346ef2ac 2024-12-17T12:38:59,000 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/0637a1377cba4fcf8df859dd346ef2ac, entries=150, sequenceid=232, filesize=11.9 K 2024-12-17T12:38:59,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/d9f4f9c85240472496f49bf8644826ff as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/d9f4f9c85240472496f49bf8644826ff 2024-12-17T12:38:59,004 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/d9f4f9c85240472496f49bf8644826ff, entries=150, sequenceid=232, filesize=11.9 K 2024-12-17T12:38:59,005 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=87.22 KB/89310 for 49a464a6255681856b85f50432ce7984 in 851ms, sequenceid=232, compaction requested=false 2024-12-17T12:38:59,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2538): Flush status journal for 49a464a6255681856b85f50432ce7984: 2024-12-17T12:38:59,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:59,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=70 2024-12-17T12:38:59,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4106): Remote procedure done, pid=70 2024-12-17T12:38:59,007 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=70, resume processing ppid=69 2024-12-17T12:38:59,007 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, ppid=69, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6140 sec 2024-12-17T12:38:59,009 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees in 1.6160 sec 2024-12-17T12:38:59,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 49a464a6255681856b85f50432ce7984 2024-12-17T12:38:59,192 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 49a464a6255681856b85f50432ce7984 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-17T12:38:59,193 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=A 2024-12-17T12:38:59,193 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:59,193 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=B 2024-12-17T12:38:59,193 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:59,193 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=C 2024-12-17T12:38:59,193 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:38:59,198 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/efe6574f79f148e3a40934bf5d4ea1aa is 50, key is test_row_0/A:col10/1734439139192/Put/seqid=0 2024-12-17T12:38:59,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742123_1299 (size=12151) 2024-12-17T12:38:59,208 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:59,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439199206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:59,209 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:59,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439199207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:59,210 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:59,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439199208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:59,210 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:59,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439199208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:59,210 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:59,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439199208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:59,310 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:59,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439199309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:59,311 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:59,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439199310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:59,312 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:59,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439199311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:59,312 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:59,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439199311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:59,312 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:59,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439199311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:59,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-17T12:38:59,495 INFO [Thread-1149 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 69 completed 2024-12-17T12:38:59,496 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-17T12:38:59,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees 2024-12-17T12:38:59,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-17T12:38:59,497 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-17T12:38:59,498 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-17T12:38:59,498 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-17T12:38:59,512 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:59,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439199511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:59,514 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:59,514 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:59,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439199512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:59,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439199513, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:59,514 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:59,514 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:59,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439199513, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:59,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439199513, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:59,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-17T12:38:59,607 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/efe6574f79f148e3a40934bf5d4ea1aa 2024-12-17T12:38:59,613 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/705f06fca91b42f2ae43e5a3a7fb0c38 is 50, key is test_row_0/B:col10/1734439139192/Put/seqid=0 2024-12-17T12:38:59,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742124_1300 (size=12151) 2024-12-17T12:38:59,649 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:59,649 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-17T12:38:59,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:59,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. as already flushing 2024-12-17T12:38:59,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:59,650 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:59,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:59,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:59,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-17T12:38:59,801 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:59,802 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-17T12:38:59,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:59,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. as already flushing 2024-12-17T12:38:59,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:59,802 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:59,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:59,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:59,816 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:59,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439199815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:59,816 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:59,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439199815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:59,816 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:59,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439199815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:59,816 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:59,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439199815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:59,818 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:38:59,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439199817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:59,954 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:38:59,954 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-17T12:38:59,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:59,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. as already flushing 2024-12-17T12:38:59,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:38:59,954 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:59,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:38:59,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:00,016 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/705f06fca91b42f2ae43e5a3a7fb0c38 2024-12-17T12:39:00,023 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/0d4afa724a9a464fbc65c02c755ed8b5 is 50, key is test_row_0/C:col10/1734439139192/Put/seqid=0 2024-12-17T12:39:00,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742125_1301 (size=12151) 2024-12-17T12:39:00,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-17T12:39:00,106 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:00,106 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-17T12:39:00,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:39:00,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. as already flushing 2024-12-17T12:39:00,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:39:00,107 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:00,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:00,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:00,258 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:00,259 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-17T12:39:00,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:39:00,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. as already flushing 2024-12-17T12:39:00,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:39:00,259 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:00,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:00,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:00,319 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:00,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439200318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:00,320 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:00,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439200319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:00,322 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:00,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439200320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:00,322 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:00,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439200320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:00,323 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:00,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439200322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:00,411 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:00,411 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-17T12:39:00,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:39:00,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. as already flushing 2024-12-17T12:39:00,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:39:00,411 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:00,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:00,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:00,435 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/0d4afa724a9a464fbc65c02c755ed8b5 2024-12-17T12:39:00,439 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/efe6574f79f148e3a40934bf5d4ea1aa as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/efe6574f79f148e3a40934bf5d4ea1aa 2024-12-17T12:39:00,442 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/efe6574f79f148e3a40934bf5d4ea1aa, entries=150, sequenceid=253, filesize=11.9 K 2024-12-17T12:39:00,443 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/705f06fca91b42f2ae43e5a3a7fb0c38 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/705f06fca91b42f2ae43e5a3a7fb0c38 2024-12-17T12:39:00,446 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/705f06fca91b42f2ae43e5a3a7fb0c38, entries=150, sequenceid=253, filesize=11.9 K 2024-12-17T12:39:00,447 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/0d4afa724a9a464fbc65c02c755ed8b5 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/0d4afa724a9a464fbc65c02c755ed8b5 2024-12-17T12:39:00,450 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/0d4afa724a9a464fbc65c02c755ed8b5, entries=150, sequenceid=253, filesize=11.9 K 2024-12-17T12:39:00,451 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=100.63 KB/103050 for 49a464a6255681856b85f50432ce7984 in 1259ms, sequenceid=253, compaction requested=true 2024-12-17T12:39:00,451 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 49a464a6255681856b85f50432ce7984: 2024-12-17T12:39:00,451 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 49a464a6255681856b85f50432ce7984:A, priority=-2147483648, current under compaction store size is 1 2024-12-17T12:39:00,451 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:39:00,451 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:39:00,451 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 49a464a6255681856b85f50432ce7984:B, priority=-2147483648, current under compaction store size is 2 2024-12-17T12:39:00,451 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:39:00,451 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 49a464a6255681856b85f50432ce7984:C, priority=-2147483648, current under compaction store size is 3 2024-12-17T12:39:00,451 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:39:00,451 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:39:00,452 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:39:00,452 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:39:00,452 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): 49a464a6255681856b85f50432ce7984/A is initiating minor compaction (all files) 2024-12-17T12:39:00,452 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 49a464a6255681856b85f50432ce7984/B is initiating minor compaction (all files) 2024-12-17T12:39:00,452 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 49a464a6255681856b85f50432ce7984/B in TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:39:00,452 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/bc42097744664d23bbe9965e01fa7bf4, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/0637a1377cba4fcf8df859dd346ef2ac, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/705f06fca91b42f2ae43e5a3a7fb0c38] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp, totalSize=36.1 K 2024-12-17T12:39:00,452 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 49a464a6255681856b85f50432ce7984/A in TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:39:00,452 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/a130ff694f5b4125b1836a1f995d47e2, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/2995483d7ea740b99ffdfad0f0d390e7, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/efe6574f79f148e3a40934bf5d4ea1aa] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp, totalSize=36.1 K 2024-12-17T12:39:00,453 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting bc42097744664d23bbe9965e01fa7bf4, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1734439135627 2024-12-17T12:39:00,453 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting a130ff694f5b4125b1836a1f995d47e2, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1734439135627 2024-12-17T12:39:00,453 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 0637a1377cba4fcf8df859dd346ef2ac, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1734439136750 2024-12-17T12:39:00,453 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2995483d7ea740b99ffdfad0f0d390e7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1734439136750 2024-12-17T12:39:00,453 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting efe6574f79f148e3a40934bf5d4ea1aa, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1734439138880 2024-12-17T12:39:00,453 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 705f06fca91b42f2ae43e5a3a7fb0c38, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1734439138880 2024-12-17T12:39:00,459 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 49a464a6255681856b85f50432ce7984#A#compaction#252 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-12-17T12:39:00,459 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 49a464a6255681856b85f50432ce7984#B#compaction#251 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-12-17T12:39:00,460 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/2ffad6c9405b40f2b7d1e680e1613f09 is 50, key is test_row_0/A:col10/1734439139192/Put/seqid=0 2024-12-17T12:39:00,460 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/144b9877ed70448283882e7d3adccb7b is 50, key is test_row_0/B:col10/1734439139192/Put/seqid=0 2024-12-17T12:39:00,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742127_1303 (size=12765) 2024-12-17T12:39:00,469 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/144b9877ed70448283882e7d3adccb7b as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/144b9877ed70448283882e7d3adccb7b 2024-12-17T12:39:00,474 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 49a464a6255681856b85f50432ce7984/B of 49a464a6255681856b85f50432ce7984 into 144b9877ed70448283882e7d3adccb7b(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:39:00,474 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 49a464a6255681856b85f50432ce7984: 2024-12-17T12:39:00,474 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984., storeName=49a464a6255681856b85f50432ce7984/B, priority=13, startTime=1734439140451; duration=0sec 2024-12-17T12:39:00,474 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:39:00,474 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 49a464a6255681856b85f50432ce7984:B 2024-12-17T12:39:00,474 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:39:00,476 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:39:00,476 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 49a464a6255681856b85f50432ce7984/C is initiating minor compaction (all files) 2024-12-17T12:39:00,476 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 49a464a6255681856b85f50432ce7984/C in TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:39:00,476 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/60200b8fb5e34b4eb9ff41f28b7a2cfc, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/d9f4f9c85240472496f49bf8644826ff, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/0d4afa724a9a464fbc65c02c755ed8b5] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp, totalSize=36.1 K 2024-12-17T12:39:00,476 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 60200b8fb5e34b4eb9ff41f28b7a2cfc, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1734439135627 2024-12-17T12:39:00,476 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting d9f4f9c85240472496f49bf8644826ff, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1734439136750 2024-12-17T12:39:00,477 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 0d4afa724a9a464fbc65c02c755ed8b5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1734439138880 2024-12-17T12:39:00,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742126_1302 (size=12765) 2024-12-17T12:39:00,484 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 49a464a6255681856b85f50432ce7984#C#compaction#253 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-12-17T12:39:00,484 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/43eb1de6ef7142afaca7b930135d9ec7 is 50, key is test_row_0/C:col10/1734439139192/Put/seqid=0 2024-12-17T12:39:00,484 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/2ffad6c9405b40f2b7d1e680e1613f09 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/2ffad6c9405b40f2b7d1e680e1613f09 2024-12-17T12:39:00,492 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 49a464a6255681856b85f50432ce7984/A of 49a464a6255681856b85f50432ce7984 into 2ffad6c9405b40f2b7d1e680e1613f09(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:39:00,492 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 49a464a6255681856b85f50432ce7984: 2024-12-17T12:39:00,492 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984., storeName=49a464a6255681856b85f50432ce7984/A, priority=13, startTime=1734439140451; duration=0sec 2024-12-17T12:39:00,492 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:39:00,492 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 49a464a6255681856b85f50432ce7984:A 2024-12-17T12:39:00,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742128_1304 (size=12765) 2024-12-17T12:39:00,517 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/43eb1de6ef7142afaca7b930135d9ec7 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/43eb1de6ef7142afaca7b930135d9ec7 2024-12-17T12:39:00,523 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 49a464a6255681856b85f50432ce7984/C of 49a464a6255681856b85f50432ce7984 into 43eb1de6ef7142afaca7b930135d9ec7(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:39:00,523 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 49a464a6255681856b85f50432ce7984: 2024-12-17T12:39:00,523 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984., storeName=49a464a6255681856b85f50432ce7984/C, priority=13, startTime=1734439140451; duration=0sec 2024-12-17T12:39:00,523 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:39:00,523 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 49a464a6255681856b85f50432ce7984:C 2024-12-17T12:39:00,563 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:00,563 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-17T12:39:00,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:39:00,564 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2837): Flushing 49a464a6255681856b85f50432ce7984 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-17T12:39:00,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=A 2024-12-17T12:39:00,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:00,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=B 2024-12-17T12:39:00,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:00,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=C 2024-12-17T12:39:00,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:00,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/eadcd42f6186440fa166f269e95ec47b is 50, key is test_row_0/A:col10/1734439139205/Put/seqid=0 2024-12-17T12:39:00,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742129_1305 (size=12301) 2024-12-17T12:39:00,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-17T12:39:00,973 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/eadcd42f6186440fa166f269e95ec47b 2024-12-17T12:39:00,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/60416615522a4f9b8f360b7f4396b5ca is 50, key is test_row_0/B:col10/1734439139205/Put/seqid=0 2024-12-17T12:39:00,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742130_1306 (size=12301) 2024-12-17T12:39:01,324 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. as already flushing 2024-12-17T12:39:01,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 49a464a6255681856b85f50432ce7984 2024-12-17T12:39:01,335 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:01,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439201333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:01,336 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:01,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439201334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:01,337 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:01,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439201335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:01,338 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:01,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439201335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:01,338 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:01,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439201336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:01,384 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/60416615522a4f9b8f360b7f4396b5ca 2024-12-17T12:39:01,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/2d7aedec2ec849aca84d511d66d8c0ed is 50, key is test_row_0/C:col10/1734439139205/Put/seqid=0 2024-12-17T12:39:01,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742131_1307 (size=12301) 2024-12-17T12:39:01,438 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:01,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439201437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:01,439 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:01,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439201437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:01,439 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:01,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439201438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:01,439 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:01,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439201438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:01,440 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:01,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439201439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:01,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-17T12:39:01,640 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:01,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439201639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:01,642 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:01,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439201641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:01,642 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:01,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439201641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:01,642 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:01,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439201641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:01,642 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:01,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439201641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:01,797 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/2d7aedec2ec849aca84d511d66d8c0ed 2024-12-17T12:39:01,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/eadcd42f6186440fa166f269e95ec47b as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/eadcd42f6186440fa166f269e95ec47b 2024-12-17T12:39:01,804 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/eadcd42f6186440fa166f269e95ec47b, entries=150, sequenceid=274, filesize=12.0 K 2024-12-17T12:39:01,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/60416615522a4f9b8f360b7f4396b5ca as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/60416615522a4f9b8f360b7f4396b5ca 2024-12-17T12:39:01,818 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/60416615522a4f9b8f360b7f4396b5ca, entries=150, sequenceid=274, filesize=12.0 K 2024-12-17T12:39:01,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/2d7aedec2ec849aca84d511d66d8c0ed as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/2d7aedec2ec849aca84d511d66d8c0ed 2024-12-17T12:39:01,823 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/2d7aedec2ec849aca84d511d66d8c0ed, entries=150, sequenceid=274, filesize=12.0 K 2024-12-17T12:39:01,824 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=107.34 KB/109920 for 49a464a6255681856b85f50432ce7984 in 1261ms, sequenceid=274, compaction requested=false 2024-12-17T12:39:01,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2538): Flush status journal for 49a464a6255681856b85f50432ce7984: 2024-12-17T12:39:01,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:39:01,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-12-17T12:39:01,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4106): Remote procedure done, pid=72 2024-12-17T12:39:01,825 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=72, resume processing ppid=71 2024-12-17T12:39:01,826 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, ppid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3270 sec 2024-12-17T12:39:01,827 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees in 2.3300 sec 2024-12-17T12:39:01,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 49a464a6255681856b85f50432ce7984 2024-12-17T12:39:01,943 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 49a464a6255681856b85f50432ce7984 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-17T12:39:01,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=A 2024-12-17T12:39:01,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:01,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=B 2024-12-17T12:39:01,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:01,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=C 2024-12-17T12:39:01,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:01,947 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/89f52191ab7e4c47983786d007da038d is 50, key is test_row_0/A:col10/1734439141334/Put/seqid=0 2024-12-17T12:39:01,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742132_1308 (size=14741) 2024-12-17T12:39:01,954 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:01,954 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:01,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439201951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:01,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439201951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:01,956 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:01,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439201953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:01,956 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:01,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439201953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:01,956 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:01,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439201954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:02,055 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:02,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439202055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:02,056 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:02,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439202055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:02,057 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:02,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439202056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:02,058 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:02,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439202057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:02,058 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:02,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439202057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:02,257 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:02,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439202256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:02,258 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:02,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439202256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:02,259 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:02,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439202258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:02,260 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:02,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439202259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:02,260 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:02,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439202259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:02,352 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/89f52191ab7e4c47983786d007da038d 2024-12-17T12:39:02,358 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/519c5b946163419bb9a18d2ab8245369 is 50, key is test_row_0/B:col10/1734439141334/Put/seqid=0 2024-12-17T12:39:02,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742133_1309 (size=12301) 2024-12-17T12:39:02,560 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:02,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439202559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:02,561 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:02,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439202560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:02,563 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:02,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439202561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:02,563 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:02,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439202562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:02,564 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:02,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439202563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:02,761 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/519c5b946163419bb9a18d2ab8245369 2024-12-17T12:39:02,767 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/a3432777231a4017a41eb1ce01ef1210 is 50, key is test_row_0/C:col10/1734439141334/Put/seqid=0 2024-12-17T12:39:02,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742134_1310 (size=12301) 2024-12-17T12:39:03,063 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:03,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439203063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:03,065 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:03,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439203064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:03,067 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:03,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439203067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:03,068 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:03,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439203067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:03,069 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:03,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439203067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:03,171 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/a3432777231a4017a41eb1ce01ef1210 2024-12-17T12:39:03,175 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/89f52191ab7e4c47983786d007da038d as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/89f52191ab7e4c47983786d007da038d 2024-12-17T12:39:03,178 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/89f52191ab7e4c47983786d007da038d, entries=200, sequenceid=294, filesize=14.4 K 2024-12-17T12:39:03,178 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/519c5b946163419bb9a18d2ab8245369 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/519c5b946163419bb9a18d2ab8245369 2024-12-17T12:39:03,181 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/519c5b946163419bb9a18d2ab8245369, entries=150, sequenceid=294, filesize=12.0 K 2024-12-17T12:39:03,183 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/a3432777231a4017a41eb1ce01ef1210 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/a3432777231a4017a41eb1ce01ef1210 2024-12-17T12:39:03,187 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/a3432777231a4017a41eb1ce01ef1210, entries=150, sequenceid=294, filesize=12.0 K 2024-12-17T12:39:03,187 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 49a464a6255681856b85f50432ce7984 in 1244ms, sequenceid=294, compaction requested=true 2024-12-17T12:39:03,188 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 49a464a6255681856b85f50432ce7984: 2024-12-17T12:39:03,188 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 49a464a6255681856b85f50432ce7984:A, priority=-2147483648, current under compaction store size is 1 2024-12-17T12:39:03,188 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:39:03,188 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 49a464a6255681856b85f50432ce7984:B, priority=-2147483648, current under compaction store size is 2 2024-12-17T12:39:03,188 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:39:03,188 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:39:03,188 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:39:03,188 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 49a464a6255681856b85f50432ce7984:C, priority=-2147483648, current under compaction store size is 3 2024-12-17T12:39:03,188 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:39:03,189 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37367 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:39:03,189 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39807 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:39:03,189 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): 49a464a6255681856b85f50432ce7984/A is initiating minor compaction (all files) 2024-12-17T12:39:03,189 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 49a464a6255681856b85f50432ce7984/B is initiating minor compaction (all files) 2024-12-17T12:39:03,189 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 49a464a6255681856b85f50432ce7984/B in TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:39:03,189 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 49a464a6255681856b85f50432ce7984/A in TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:39:03,189 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/144b9877ed70448283882e7d3adccb7b, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/60416615522a4f9b8f360b7f4396b5ca, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/519c5b946163419bb9a18d2ab8245369] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp, totalSize=36.5 K 2024-12-17T12:39:03,189 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/2ffad6c9405b40f2b7d1e680e1613f09, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/eadcd42f6186440fa166f269e95ec47b, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/89f52191ab7e4c47983786d007da038d] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp, totalSize=38.9 K 2024-12-17T12:39:03,189 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2ffad6c9405b40f2b7d1e680e1613f09, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1734439138880 2024-12-17T12:39:03,189 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 144b9877ed70448283882e7d3adccb7b, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1734439138880 2024-12-17T12:39:03,189 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting eadcd42f6186440fa166f269e95ec47b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1734439139197 2024-12-17T12:39:03,190 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 89f52191ab7e4c47983786d007da038d, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1734439141332 2024-12-17T12:39:03,190 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 60416615522a4f9b8f360b7f4396b5ca, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1734439139197 2024-12-17T12:39:03,190 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 519c5b946163419bb9a18d2ab8245369, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1734439141332 2024-12-17T12:39:03,196 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 49a464a6255681856b85f50432ce7984#A#compaction#260 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-12-17T12:39:03,196 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/1095a068fb834973a0f234670feae581 is 50, key is test_row_0/A:col10/1734439141334/Put/seqid=0 2024-12-17T12:39:03,205 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 49a464a6255681856b85f50432ce7984#B#compaction#261 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-12-17T12:39:03,206 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/28304a4e5bd8450e9af24d45db360651 is 50, key is test_row_0/B:col10/1734439141334/Put/seqid=0 2024-12-17T12:39:03,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742135_1311 (size=13017) 2024-12-17T12:39:03,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742136_1312 (size=13017) 2024-12-17T12:39:03,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-17T12:39:03,601 INFO [Thread-1149 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 71 completed 2024-12-17T12:39:03,602 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-17T12:39:03,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees 2024-12-17T12:39:03,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-17T12:39:03,604 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-17T12:39:03,604 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-17T12:39:03,604 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=74, ppid=73, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-17T12:39:03,613 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/1095a068fb834973a0f234670feae581 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/1095a068fb834973a0f234670feae581 2024-12-17T12:39:03,617 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 49a464a6255681856b85f50432ce7984/A of 49a464a6255681856b85f50432ce7984 into 1095a068fb834973a0f234670feae581(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:39:03,617 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 49a464a6255681856b85f50432ce7984: 2024-12-17T12:39:03,617 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984., storeName=49a464a6255681856b85f50432ce7984/A, priority=13, startTime=1734439143188; duration=0sec 2024-12-17T12:39:03,617 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:39:03,617 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 49a464a6255681856b85f50432ce7984:A 2024-12-17T12:39:03,617 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:39:03,618 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37367 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:39:03,618 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): 49a464a6255681856b85f50432ce7984/C is initiating minor compaction (all files) 2024-12-17T12:39:03,618 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 49a464a6255681856b85f50432ce7984/C in TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:39:03,618 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/43eb1de6ef7142afaca7b930135d9ec7, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/2d7aedec2ec849aca84d511d66d8c0ed, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/a3432777231a4017a41eb1ce01ef1210] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp, totalSize=36.5 K 2024-12-17T12:39:03,618 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 43eb1de6ef7142afaca7b930135d9ec7, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1734439138880 2024-12-17T12:39:03,619 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2d7aedec2ec849aca84d511d66d8c0ed, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1734439139197 2024-12-17T12:39:03,619 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting a3432777231a4017a41eb1ce01ef1210, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1734439141332 2024-12-17T12:39:03,625 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/28304a4e5bd8450e9af24d45db360651 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/28304a4e5bd8450e9af24d45db360651 2024-12-17T12:39:03,628 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 49a464a6255681856b85f50432ce7984#C#compaction#262 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-12-17T12:39:03,628 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/a079255bc0cf4a2485a88629d958a8db is 50, key is test_row_0/C:col10/1734439141334/Put/seqid=0 2024-12-17T12:39:03,629 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 49a464a6255681856b85f50432ce7984/B of 49a464a6255681856b85f50432ce7984 into 28304a4e5bd8450e9af24d45db360651(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:39:03,629 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 49a464a6255681856b85f50432ce7984: 2024-12-17T12:39:03,629 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984., storeName=49a464a6255681856b85f50432ce7984/B, priority=13, startTime=1734439143188; duration=0sec 2024-12-17T12:39:03,629 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:39:03,629 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 49a464a6255681856b85f50432ce7984:B 2024-12-17T12:39:03,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742137_1313 (size=13017) 2024-12-17T12:39:03,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-17T12:39:03,756 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:03,756 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-17T12:39:03,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:39:03,757 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2837): Flushing 49a464a6255681856b85f50432ce7984 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-17T12:39:03,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=A 2024-12-17T12:39:03,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:03,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=B 2024-12-17T12:39:03,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:03,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=C 2024-12-17T12:39:03,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:03,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/fd7e0aaad09b4105a02cb1b76c60562b is 50, key is test_row_0/A:col10/1734439141952/Put/seqid=0 2024-12-17T12:39:03,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742138_1314 (size=12301) 2024-12-17T12:39:03,765 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/fd7e0aaad09b4105a02cb1b76c60562b 2024-12-17T12:39:03,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/ff3d959083764213aba22ef0b9f25928 is 50, key is test_row_0/B:col10/1734439141952/Put/seqid=0 2024-12-17T12:39:03,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742139_1315 (size=12301) 2024-12-17T12:39:03,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-17T12:39:04,036 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/a079255bc0cf4a2485a88629d958a8db as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/a079255bc0cf4a2485a88629d958a8db 2024-12-17T12:39:04,039 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 49a464a6255681856b85f50432ce7984/C of 49a464a6255681856b85f50432ce7984 into a079255bc0cf4a2485a88629d958a8db(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:39:04,039 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 49a464a6255681856b85f50432ce7984: 2024-12-17T12:39:04,039 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984., storeName=49a464a6255681856b85f50432ce7984/C, priority=13, startTime=1734439143188; duration=0sec 2024-12-17T12:39:04,040 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:39:04,040 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 49a464a6255681856b85f50432ce7984:C 2024-12-17T12:39:04,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 49a464a6255681856b85f50432ce7984 2024-12-17T12:39:04,066 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. as already flushing 2024-12-17T12:39:04,080 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:04,080 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:04,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439204078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:04,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439204078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:04,081 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:04,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439204079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:04,081 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:04,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439204079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:04,081 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:04,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439204080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:04,177 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/ff3d959083764213aba22ef0b9f25928 2024-12-17T12:39:04,183 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:04,183 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:04,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439204181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:04,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439204182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:04,183 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:04,183 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:04,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439204182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:04,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439204182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:04,183 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:04,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439204182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:04,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/f273fa6708cc42f4b9c0abc2a3f33c5e is 50, key is test_row_0/C:col10/1734439141952/Put/seqid=0 2024-12-17T12:39:04,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742140_1316 (size=12301) 2024-12-17T12:39:04,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-17T12:39:04,385 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:04,385 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:04,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439204384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:04,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439204384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:04,386 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:04,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439204384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:04,386 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:04,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439204385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:04,387 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:04,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439204385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:04,588 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/f273fa6708cc42f4b9c0abc2a3f33c5e 2024-12-17T12:39:04,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/fd7e0aaad09b4105a02cb1b76c60562b as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/fd7e0aaad09b4105a02cb1b76c60562b 2024-12-17T12:39:04,595 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/fd7e0aaad09b4105a02cb1b76c60562b, entries=150, sequenceid=312, filesize=12.0 K 2024-12-17T12:39:04,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/ff3d959083764213aba22ef0b9f25928 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/ff3d959083764213aba22ef0b9f25928 2024-12-17T12:39:04,599 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/ff3d959083764213aba22ef0b9f25928, entries=150, sequenceid=312, filesize=12.0 K 2024-12-17T12:39:04,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/f273fa6708cc42f4b9c0abc2a3f33c5e as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/f273fa6708cc42f4b9c0abc2a3f33c5e 2024-12-17T12:39:04,603 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/f273fa6708cc42f4b9c0abc2a3f33c5e, entries=150, sequenceid=312, filesize=12.0 K 2024-12-17T12:39:04,604 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 49a464a6255681856b85f50432ce7984 in 847ms, sequenceid=312, compaction requested=false 2024-12-17T12:39:04,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2538): Flush status journal for 49a464a6255681856b85f50432ce7984: 2024-12-17T12:39:04,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:39:04,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=74 2024-12-17T12:39:04,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4106): Remote procedure done, pid=74 2024-12-17T12:39:04,606 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=74, resume processing ppid=73 2024-12-17T12:39:04,606 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, ppid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0010 sec 2024-12-17T12:39:04,607 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees in 1.0040 sec 2024-12-17T12:39:04,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 49a464a6255681856b85f50432ce7984 2024-12-17T12:39:04,688 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 49a464a6255681856b85f50432ce7984 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-17T12:39:04,688 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=A 2024-12-17T12:39:04,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:04,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=B 2024-12-17T12:39:04,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:04,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=C 2024-12-17T12:39:04,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:04,692 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/aecd066447ad479d9c3e35a8034a2e81 is 50, key is test_row_0/A:col10/1734439144687/Put/seqid=0 2024-12-17T12:39:04,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742141_1317 (size=17181) 2024-12-17T12:39:04,697 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:04,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439204694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:04,698 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:04,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439204695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:04,699 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:04,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439204696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:04,699 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:04,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439204696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:04,699 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:04,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439204697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:04,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-17T12:39:04,706 INFO [Thread-1149 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 73 completed 2024-12-17T12:39:04,707 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-17T12:39:04,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees 2024-12-17T12:39:04,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-17T12:39:04,708 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-17T12:39:04,709 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-17T12:39:04,709 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-17T12:39:04,800 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:04,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439204798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:04,800 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:04,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439204799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:04,801 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:04,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439204799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:04,801 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:04,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439204799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:04,801 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:04,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439204800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:04,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-17T12:39:04,860 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:04,860 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-17T12:39:04,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:39:04,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. as already flushing 2024-12-17T12:39:04,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:39:04,861 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:04,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:04,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:05,004 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:05,004 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:05,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439205001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:05,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439205001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:05,004 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:05,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439205002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:05,004 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:05,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439205003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:05,004 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:05,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439205003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:05,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-17T12:39:05,013 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:05,013 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-17T12:39:05,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:39:05,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. as already flushing 2024-12-17T12:39:05,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:39:05,013 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:05,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:05,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:05,096 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=335 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/aecd066447ad479d9c3e35a8034a2e81 2024-12-17T12:39:05,103 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/93846ba77197410aaa08e1c8cd7ffe72 is 50, key is test_row_0/B:col10/1734439144687/Put/seqid=0 2024-12-17T12:39:05,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742142_1318 (size=12301) 2024-12-17T12:39:05,164 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:05,164 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-17T12:39:05,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:39:05,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. as already flushing 2024-12-17T12:39:05,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:39:05,164 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:05,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:05,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:05,306 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:05,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439205305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:05,306 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:05,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439205305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:05,307 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:05,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439205306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:05,308 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:05,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439205306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:05,308 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:05,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439205307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:05,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-17T12:39:05,316 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:05,316 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-17T12:39:05,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:39:05,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. as already flushing 2024-12-17T12:39:05,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:39:05,317 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:05,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:05,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:05,468 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:05,469 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-17T12:39:05,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:39:05,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. as already flushing 2024-12-17T12:39:05,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:39:05,469 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:05,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:05,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:05,506 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=335 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/93846ba77197410aaa08e1c8cd7ffe72 2024-12-17T12:39:05,513 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/c490327075ed490e90096613d181e375 is 50, key is test_row_0/C:col10/1734439144687/Put/seqid=0 2024-12-17T12:39:05,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742143_1319 (size=12301) 2024-12-17T12:39:05,621 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:05,621 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-17T12:39:05,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:39:05,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. as already flushing 2024-12-17T12:39:05,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:39:05,621 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:05,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:05,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:05,773 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:05,774 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-17T12:39:05,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:39:05,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. as already flushing 2024-12-17T12:39:05,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:39:05,774 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:05,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:05,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:05,809 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:05,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48314 deadline: 1734439205808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:05,809 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:05,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48328 deadline: 1734439205808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:05,809 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:05,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48362 deadline: 1734439205808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:05,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-17T12:39:05,812 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:05,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48380 deadline: 1734439205811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:05,812 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:05,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48354 deadline: 1734439205812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:05,916 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=335 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/c490327075ed490e90096613d181e375 2024-12-17T12:39:05,920 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/aecd066447ad479d9c3e35a8034a2e81 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/aecd066447ad479d9c3e35a8034a2e81 2024-12-17T12:39:05,925 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/aecd066447ad479d9c3e35a8034a2e81, entries=250, sequenceid=335, filesize=16.8 K 2024-12-17T12:39:05,925 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:05,926 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/93846ba77197410aaa08e1c8cd7ffe72 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/93846ba77197410aaa08e1c8cd7ffe72 2024-12-17T12:39:05,926 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-17T12:39:05,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:39:05,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. as already flushing 2024-12-17T12:39:05,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:39:05,926 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:05,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:05,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:05,930 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/93846ba77197410aaa08e1c8cd7ffe72, entries=150, sequenceid=335, filesize=12.0 K 2024-12-17T12:39:05,931 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/c490327075ed490e90096613d181e375 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/c490327075ed490e90096613d181e375 2024-12-17T12:39:05,934 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/c490327075ed490e90096613d181e375, entries=150, sequenceid=335, filesize=12.0 K 2024-12-17T12:39:05,935 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 49a464a6255681856b85f50432ce7984 in 1247ms, sequenceid=335, compaction requested=true 2024-12-17T12:39:05,935 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 49a464a6255681856b85f50432ce7984: 2024-12-17T12:39:05,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 49a464a6255681856b85f50432ce7984:A, priority=-2147483648, current under compaction store size is 1 2024-12-17T12:39:05,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:39:05,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 49a464a6255681856b85f50432ce7984:B, priority=-2147483648, current under compaction store size is 2 2024-12-17T12:39:05,935 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:39:05,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:39:05,935 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:39:05,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 49a464a6255681856b85f50432ce7984:C, priority=-2147483648, current under compaction store size is 3 2024-12-17T12:39:05,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:39:05,936 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42499 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:39:05,936 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:39:05,936 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 49a464a6255681856b85f50432ce7984/B is initiating minor compaction (all files) 2024-12-17T12:39:05,936 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): 49a464a6255681856b85f50432ce7984/A is initiating minor compaction (all files) 2024-12-17T12:39:05,936 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 49a464a6255681856b85f50432ce7984/A in TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:39:05,937 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 49a464a6255681856b85f50432ce7984/B in TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:39:05,937 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/28304a4e5bd8450e9af24d45db360651, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/ff3d959083764213aba22ef0b9f25928, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/93846ba77197410aaa08e1c8cd7ffe72] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp, totalSize=36.7 K 2024-12-17T12:39:05,937 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/1095a068fb834973a0f234670feae581, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/fd7e0aaad09b4105a02cb1b76c60562b, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/aecd066447ad479d9c3e35a8034a2e81] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp, totalSize=41.5 K 2024-12-17T12:39:05,937 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1095a068fb834973a0f234670feae581, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1734439141332 2024-12-17T12:39:05,937 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 28304a4e5bd8450e9af24d45db360651, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1734439141332 2024-12-17T12:39:05,937 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting fd7e0aaad09b4105a02cb1b76c60562b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1734439141949 2024-12-17T12:39:05,937 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting ff3d959083764213aba22ef0b9f25928, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1734439141949 2024-12-17T12:39:05,937 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting aecd066447ad479d9c3e35a8034a2e81, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1734439144078 2024-12-17T12:39:05,937 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 93846ba77197410aaa08e1c8cd7ffe72, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1734439144078 2024-12-17T12:39:05,943 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 49a464a6255681856b85f50432ce7984#B#compaction#269 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-12-17T12:39:05,944 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/034d931ab304483f8236608372e3034b is 50, key is test_row_0/B:col10/1734439144687/Put/seqid=0 2024-12-17T12:39:05,946 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 49a464a6255681856b85f50432ce7984#A#compaction#270 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-12-17T12:39:05,947 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/4073c95da9eb43c5aa231ebd07563947 is 50, key is test_row_0/A:col10/1734439144687/Put/seqid=0 2024-12-17T12:39:05,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742144_1320 (size=13119) 2024-12-17T12:39:05,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742145_1321 (size=13119) 2024-12-17T12:39:06,078 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:06,078 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-17T12:39:06,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:39:06,079 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2837): Flushing 49a464a6255681856b85f50432ce7984 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-17T12:39:06,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=A 2024-12-17T12:39:06,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:06,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=B 2024-12-17T12:39:06,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:06,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=C 2024-12-17T12:39:06,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:06,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/17b7526aa87b4f53a1af1e1eda33f764 is 50, key is test_row_0/A:col10/1734439144696/Put/seqid=0 2024-12-17T12:39:06,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742146_1322 (size=12301) 2024-12-17T12:39:06,355 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/034d931ab304483f8236608372e3034b as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/034d931ab304483f8236608372e3034b 2024-12-17T12:39:06,359 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 49a464a6255681856b85f50432ce7984/B of 49a464a6255681856b85f50432ce7984 into 034d931ab304483f8236608372e3034b(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:39:06,359 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 49a464a6255681856b85f50432ce7984: 2024-12-17T12:39:06,359 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984., storeName=49a464a6255681856b85f50432ce7984/B, priority=13, startTime=1734439145935; duration=0sec 2024-12-17T12:39:06,359 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:39:06,359 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 49a464a6255681856b85f50432ce7984:B 2024-12-17T12:39:06,359 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:39:06,360 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:39:06,360 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 49a464a6255681856b85f50432ce7984/C is initiating minor compaction (all files) 2024-12-17T12:39:06,361 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 49a464a6255681856b85f50432ce7984/C in TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:39:06,361 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/a079255bc0cf4a2485a88629d958a8db, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/f273fa6708cc42f4b9c0abc2a3f33c5e, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/c490327075ed490e90096613d181e375] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp, totalSize=36.7 K 2024-12-17T12:39:06,361 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting a079255bc0cf4a2485a88629d958a8db, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1734439141332 2024-12-17T12:39:06,361 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting f273fa6708cc42f4b9c0abc2a3f33c5e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1734439141949 2024-12-17T12:39:06,361 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting c490327075ed490e90096613d181e375, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1734439144078 2024-12-17T12:39:06,367 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/4073c95da9eb43c5aa231ebd07563947 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/4073c95da9eb43c5aa231ebd07563947 2024-12-17T12:39:06,367 DEBUG [Thread-1154 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x739f6ad6 to 127.0.0.1:59557 2024-12-17T12:39:06,367 DEBUG [Thread-1154 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:39:06,368 DEBUG [Thread-1156 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5caaf139 to 127.0.0.1:59557 2024-12-17T12:39:06,368 DEBUG [Thread-1156 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:39:06,368 DEBUG [Thread-1150 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x05118689 to 127.0.0.1:59557 2024-12-17T12:39:06,369 DEBUG [Thread-1150 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:39:06,369 DEBUG [Thread-1158 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x04506927 to 127.0.0.1:59557 2024-12-17T12:39:06,369 DEBUG [Thread-1158 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:39:06,371 DEBUG [Thread-1152 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7d6434c1 to 127.0.0.1:59557 2024-12-17T12:39:06,371 DEBUG [Thread-1152 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:39:06,372 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 49a464a6255681856b85f50432ce7984/A of 49a464a6255681856b85f50432ce7984 into 4073c95da9eb43c5aa231ebd07563947(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:39:06,373 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 49a464a6255681856b85f50432ce7984: 2024-12-17T12:39:06,373 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984., storeName=49a464a6255681856b85f50432ce7984/A, priority=13, startTime=1734439145935; duration=0sec 2024-12-17T12:39:06,373 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:39:06,373 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 49a464a6255681856b85f50432ce7984:A 2024-12-17T12:39:06,377 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 49a464a6255681856b85f50432ce7984#C#compaction#272 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-12-17T12:39:06,378 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/b71a317a38e14161a23b9fb778453324 is 50, key is test_row_0/C:col10/1734439144687/Put/seqid=0 2024-12-17T12:39:06,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742147_1323 (size=13119) 2024-12-17T12:39:06,487 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=349 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/17b7526aa87b4f53a1af1e1eda33f764 2024-12-17T12:39:06,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/1dddf20d176643f688122ced0b797148 is 50, key is test_row_0/B:col10/1734439144696/Put/seqid=0 2024-12-17T12:39:06,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742148_1324 (size=12301) 2024-12-17T12:39:06,795 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/b71a317a38e14161a23b9fb778453324 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/b71a317a38e14161a23b9fb778453324 2024-12-17T12:39:06,802 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 49a464a6255681856b85f50432ce7984/C of 49a464a6255681856b85f50432ce7984 into b71a317a38e14161a23b9fb778453324(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:39:06,803 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 49a464a6255681856b85f50432ce7984: 2024-12-17T12:39:06,803 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984., storeName=49a464a6255681856b85f50432ce7984/C, priority=13, startTime=1734439145935; duration=0sec 2024-12-17T12:39:06,803 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:39:06,803 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 49a464a6255681856b85f50432ce7984:C 2024-12-17T12:39:06,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-17T12:39:06,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 49a464a6255681856b85f50432ce7984 2024-12-17T12:39:06,817 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. as already flushing 2024-12-17T12:39:06,817 DEBUG [Thread-1141 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2d89d666 to 127.0.0.1:59557 2024-12-17T12:39:06,817 DEBUG [Thread-1141 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:39:06,817 DEBUG [Thread-1143 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x44769f38 to 127.0.0.1:59557 2024-12-17T12:39:06,817 DEBUG [Thread-1143 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:39:06,819 DEBUG [Thread-1145 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6cf29c07 to 127.0.0.1:59557 2024-12-17T12:39:06,819 DEBUG [Thread-1145 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:39:06,819 DEBUG [Thread-1139 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x528c21b0 to 127.0.0.1:59557 2024-12-17T12:39:06,819 DEBUG [Thread-1139 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:39:06,821 DEBUG [Thread-1147 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0cfb5a18 to 127.0.0.1:59557 2024-12-17T12:39:06,821 DEBUG [Thread-1147 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:39:06,885 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-17T12:39:06,904 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=349 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/1dddf20d176643f688122ced0b797148 2024-12-17T12:39:06,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/e8901a24558e4e25a1b3b35d3656f828 is 50, key is test_row_0/C:col10/1734439144696/Put/seqid=0 2024-12-17T12:39:06,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742149_1325 (size=12301) 2024-12-17T12:39:07,323 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=349 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/e8901a24558e4e25a1b3b35d3656f828 2024-12-17T12:39:07,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/17b7526aa87b4f53a1af1e1eda33f764 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/17b7526aa87b4f53a1af1e1eda33f764 2024-12-17T12:39:07,342 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/17b7526aa87b4f53a1af1e1eda33f764, entries=150, sequenceid=349, filesize=12.0 K 2024-12-17T12:39:07,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/1dddf20d176643f688122ced0b797148 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/1dddf20d176643f688122ced0b797148 2024-12-17T12:39:07,347 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/1dddf20d176643f688122ced0b797148, entries=150, sequenceid=349, filesize=12.0 K 2024-12-17T12:39:07,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/e8901a24558e4e25a1b3b35d3656f828 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/e8901a24558e4e25a1b3b35d3656f828 2024-12-17T12:39:07,351 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/e8901a24558e4e25a1b3b35d3656f828, entries=150, sequenceid=349, filesize=12.0 K 2024-12-17T12:39:07,352 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=33.54 KB/34350 for 49a464a6255681856b85f50432ce7984 in 1274ms, sequenceid=349, compaction requested=false 2024-12-17T12:39:07,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2538): Flush status journal for 49a464a6255681856b85f50432ce7984: 2024-12-17T12:39:07,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:39:07,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=76 2024-12-17T12:39:07,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4106): Remote procedure done, pid=76 2024-12-17T12:39:07,354 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-12-17T12:39:07,354 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6440 sec 2024-12-17T12:39:07,355 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees in 2.6470 sec 2024-12-17T12:39:08,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-17T12:39:08,814 INFO [Thread-1149 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 75 completed 2024-12-17T12:39:08,814 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-17T12:39:08,814 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 57 2024-12-17T12:39:08,814 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 54 2024-12-17T12:39:08,814 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 52 2024-12-17T12:39:08,814 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 60 2024-12-17T12:39:08,814 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 55 2024-12-17T12:39:08,814 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-17T12:39:08,815 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8643 2024-12-17T12:39:08,815 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8329 2024-12-17T12:39:08,815 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8229 2024-12-17T12:39:08,815 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8566 2024-12-17T12:39:08,815 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8312 2024-12-17T12:39:08,815 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-17T12:39:08,815 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-17T12:39:08,815 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x75565da1 to 127.0.0.1:59557 2024-12-17T12:39:08,815 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:39:08,816 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-17T12:39:08,817 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-17T12:39:08,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=77, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-17T12:39:08,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-17T12:39:08,822 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734439148821"}]},"ts":"1734439148821"} 2024-12-17T12:39:08,823 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-17T12:39:08,866 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-17T12:39:08,867 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=77, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-17T12:39:08,869 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=79, ppid=78, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=49a464a6255681856b85f50432ce7984, UNASSIGN}] 2024-12-17T12:39:08,870 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=79, ppid=78, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=49a464a6255681856b85f50432ce7984, UNASSIGN 2024-12-17T12:39:08,871 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=79 updating hbase:meta row=49a464a6255681856b85f50432ce7984, regionState=CLOSING, regionLocation=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:08,873 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-17T12:39:08,873 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE; CloseRegionProcedure 49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372}] 2024-12-17T12:39:08,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-17T12:39:09,025 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:09,026 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=80}] handler.UnassignRegionHandler(124): Close 49a464a6255681856b85f50432ce7984 2024-12-17T12:39:09,026 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=80}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-17T12:39:09,027 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=80}] regionserver.HRegion(1681): Closing 49a464a6255681856b85f50432ce7984, disabling compactions & flushes 2024-12-17T12:39:09,027 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=80}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:39:09,027 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=80}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:39:09,027 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=80}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. after waiting 0 ms 2024-12-17T12:39:09,027 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=80}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:39:09,027 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=80}] regionserver.HRegion(2837): Flushing 49a464a6255681856b85f50432ce7984 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-17T12:39:09,028 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=A 2024-12-17T12:39:09,028 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:09,028 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=B 2024-12-17T12:39:09,028 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:09,028 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 49a464a6255681856b85f50432ce7984, store=C 2024-12-17T12:39:09,029 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:09,037 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/df6d137b3bc54654be6f371c622d185e is 50, key is test_row_0/A:col10/1734439146818/Put/seqid=0 2024-12-17T12:39:09,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742150_1326 (size=9857) 2024-12-17T12:39:09,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-17T12:39:09,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-17T12:39:09,443 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=360 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/df6d137b3bc54654be6f371c622d185e 2024-12-17T12:39:09,459 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/8cab110b9c4248fd89d42111adee5f6d is 50, key is test_row_0/B:col10/1734439146818/Put/seqid=0 2024-12-17T12:39:09,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742151_1327 (size=9857) 2024-12-17T12:39:09,865 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=360 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/8cab110b9c4248fd89d42111adee5f6d 2024-12-17T12:39:09,878 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/6a1ed4ff4a9e409eb3d988019eda230a is 50, key is test_row_0/C:col10/1734439146818/Put/seqid=0 2024-12-17T12:39:09,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742152_1328 (size=9857) 2024-12-17T12:39:09,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-17T12:39:10,284 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=360 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/6a1ed4ff4a9e409eb3d988019eda230a 2024-12-17T12:39:10,295 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/A/df6d137b3bc54654be6f371c622d185e as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/df6d137b3bc54654be6f371c622d185e 2024-12-17T12:39:10,300 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/df6d137b3bc54654be6f371c622d185e, entries=100, sequenceid=360, filesize=9.6 K 2024-12-17T12:39:10,301 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/B/8cab110b9c4248fd89d42111adee5f6d as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/8cab110b9c4248fd89d42111adee5f6d 2024-12-17T12:39:10,306 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/8cab110b9c4248fd89d42111adee5f6d, entries=100, sequenceid=360, filesize=9.6 K 2024-12-17T12:39:10,306 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/.tmp/C/6a1ed4ff4a9e409eb3d988019eda230a as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/6a1ed4ff4a9e409eb3d988019eda230a 2024-12-17T12:39:10,310 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/6a1ed4ff4a9e409eb3d988019eda230a, entries=100, sequenceid=360, filesize=9.6 K 2024-12-17T12:39:10,310 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=80}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 49a464a6255681856b85f50432ce7984 in 1283ms, sequenceid=360, compaction requested=true 2024-12-17T12:39:10,311 DEBUG [StoreCloser-TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/5cd31a3c660a4f0aafb8f848f5c6e7e8, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/ad49a32f2a2947d99035999fd5d4ffdb, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/2d31e542e7464c38a0ad5bf388457528, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/7ca4a23ffa1e4930b7a301063f313bfa, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/c188df83cf804cd8b2584aaa40dfdf09, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/a343c1ac3ad5466dbd5b5461c66619bc, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/3a35f82678d344c6931b1473be1c80e8, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/79bc3398f3384b6ca968c00e4800cd78, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/0871be2e44c94e1db2d46460e8c33ce9, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/1884618863f44588b6d2a6cfef9e2c26, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/1d68b43afbe24077be932102174f9a7c, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/f58bef4f935b4e38bdb0062f3b514412, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/dbc568b1494b45f1973bbb60042580a8, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/93f43bdabcf849e98e895738b76f3ac6, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/d971cf6ff2b34ed4a093ff5b5003584f, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/a130ff694f5b4125b1836a1f995d47e2, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/2995483d7ea740b99ffdfad0f0d390e7, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/2ffad6c9405b40f2b7d1e680e1613f09, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/efe6574f79f148e3a40934bf5d4ea1aa, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/eadcd42f6186440fa166f269e95ec47b, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/89f52191ab7e4c47983786d007da038d, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/1095a068fb834973a0f234670feae581, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/fd7e0aaad09b4105a02cb1b76c60562b, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/aecd066447ad479d9c3e35a8034a2e81] to archive 2024-12-17T12:39:10,312 DEBUG [StoreCloser-TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-17T12:39:10,313 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/7ca4a23ffa1e4930b7a301063f313bfa to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/7ca4a23ffa1e4930b7a301063f313bfa 2024-12-17T12:39:10,314 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/5cd31a3c660a4f0aafb8f848f5c6e7e8 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/5cd31a3c660a4f0aafb8f848f5c6e7e8 2024-12-17T12:39:10,314 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/ad49a32f2a2947d99035999fd5d4ffdb to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/ad49a32f2a2947d99035999fd5d4ffdb 2024-12-17T12:39:10,314 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/2d31e542e7464c38a0ad5bf388457528 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/2d31e542e7464c38a0ad5bf388457528 2024-12-17T12:39:10,314 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/3a35f82678d344c6931b1473be1c80e8 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/3a35f82678d344c6931b1473be1c80e8 2024-12-17T12:39:10,314 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/a343c1ac3ad5466dbd5b5461c66619bc to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/a343c1ac3ad5466dbd5b5461c66619bc 2024-12-17T12:39:10,314 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/c188df83cf804cd8b2584aaa40dfdf09 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/c188df83cf804cd8b2584aaa40dfdf09 2024-12-17T12:39:10,314 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/79bc3398f3384b6ca968c00e4800cd78 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/79bc3398f3384b6ca968c00e4800cd78 2024-12-17T12:39:10,315 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/f58bef4f935b4e38bdb0062f3b514412 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/f58bef4f935b4e38bdb0062f3b514412 2024-12-17T12:39:10,315 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/1d68b43afbe24077be932102174f9a7c to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/1d68b43afbe24077be932102174f9a7c 2024-12-17T12:39:10,315 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/1884618863f44588b6d2a6cfef9e2c26 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/1884618863f44588b6d2a6cfef9e2c26 2024-12-17T12:39:10,315 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/0871be2e44c94e1db2d46460e8c33ce9 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/0871be2e44c94e1db2d46460e8c33ce9 2024-12-17T12:39:10,315 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/d971cf6ff2b34ed4a093ff5b5003584f to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/d971cf6ff2b34ed4a093ff5b5003584f 2024-12-17T12:39:10,315 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/93f43bdabcf849e98e895738b76f3ac6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/93f43bdabcf849e98e895738b76f3ac6 2024-12-17T12:39:10,316 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/a130ff694f5b4125b1836a1f995d47e2 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/a130ff694f5b4125b1836a1f995d47e2 2024-12-17T12:39:10,316 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/dbc568b1494b45f1973bbb60042580a8 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/dbc568b1494b45f1973bbb60042580a8 2024-12-17T12:39:10,316 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/efe6574f79f148e3a40934bf5d4ea1aa to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/efe6574f79f148e3a40934bf5d4ea1aa 2024-12-17T12:39:10,317 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/2ffad6c9405b40f2b7d1e680e1613f09 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/2ffad6c9405b40f2b7d1e680e1613f09 2024-12-17T12:39:10,317 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/2995483d7ea740b99ffdfad0f0d390e7 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/2995483d7ea740b99ffdfad0f0d390e7 2024-12-17T12:39:10,317 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/eadcd42f6186440fa166f269e95ec47b to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/eadcd42f6186440fa166f269e95ec47b 2024-12-17T12:39:10,317 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/89f52191ab7e4c47983786d007da038d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/89f52191ab7e4c47983786d007da038d 2024-12-17T12:39:10,317 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/1095a068fb834973a0f234670feae581 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/1095a068fb834973a0f234670feae581 2024-12-17T12:39:10,317 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/aecd066447ad479d9c3e35a8034a2e81 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/aecd066447ad479d9c3e35a8034a2e81 2024-12-17T12:39:10,317 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/fd7e0aaad09b4105a02cb1b76c60562b to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/fd7e0aaad09b4105a02cb1b76c60562b 2024-12-17T12:39:10,318 DEBUG [StoreCloser-TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/d9d4db24b2314cb8964f2ea8d1c0ab25, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/b477db99b9c5475fa709d9c1291e6391, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/6229fbeb51eb4275b4e7f73e20e2494a, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/13b6408319b24f0a9ae6d27db53d246c, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/510998c3a05743e2a6ddfa67bc44231c, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/9cb43f670c5542e59b2d9ce8712c6892, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/7821ab9463f748d98f5072cd4c44b924, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/0e85790e318d49af9219b9699cf7177d, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/1c6641476d9d47e7ab7a2a705c4d7f25, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/10979d10a72f4cf5bb6fedb469812f86, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/5b500d888cb94cada93cc8aa44bb3565, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/8088552e369a4c538c3823fadf232420, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/682be2111e684da9a66be288581e025b, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/8fc3f86438ac4bf5a34ef98548876232, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/bc42097744664d23bbe9965e01fa7bf4, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/c57bd66151594431b4befe5155086ec5, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/0637a1377cba4fcf8df859dd346ef2ac, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/144b9877ed70448283882e7d3adccb7b, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/705f06fca91b42f2ae43e5a3a7fb0c38, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/60416615522a4f9b8f360b7f4396b5ca, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/28304a4e5bd8450e9af24d45db360651, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/519c5b946163419bb9a18d2ab8245369, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/ff3d959083764213aba22ef0b9f25928, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/93846ba77197410aaa08e1c8cd7ffe72] to archive 2024-12-17T12:39:10,319 DEBUG [StoreCloser-TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-17T12:39:10,320 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/b477db99b9c5475fa709d9c1291e6391 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/b477db99b9c5475fa709d9c1291e6391 2024-12-17T12:39:10,320 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/d9d4db24b2314cb8964f2ea8d1c0ab25 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/d9d4db24b2314cb8964f2ea8d1c0ab25 2024-12-17T12:39:10,320 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/7821ab9463f748d98f5072cd4c44b924 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/7821ab9463f748d98f5072cd4c44b924 2024-12-17T12:39:10,320 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/6229fbeb51eb4275b4e7f73e20e2494a to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/6229fbeb51eb4275b4e7f73e20e2494a 2024-12-17T12:39:10,320 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/13b6408319b24f0a9ae6d27db53d246c to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/13b6408319b24f0a9ae6d27db53d246c 2024-12-17T12:39:10,321 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/0e85790e318d49af9219b9699cf7177d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/0e85790e318d49af9219b9699cf7177d 2024-12-17T12:39:10,321 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/9cb43f670c5542e59b2d9ce8712c6892 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/9cb43f670c5542e59b2d9ce8712c6892 2024-12-17T12:39:10,321 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/510998c3a05743e2a6ddfa67bc44231c to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/510998c3a05743e2a6ddfa67bc44231c 2024-12-17T12:39:10,321 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/1c6641476d9d47e7ab7a2a705c4d7f25 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/1c6641476d9d47e7ab7a2a705c4d7f25 2024-12-17T12:39:10,322 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/8088552e369a4c538c3823fadf232420 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/8088552e369a4c538c3823fadf232420 2024-12-17T12:39:10,322 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/10979d10a72f4cf5bb6fedb469812f86 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/10979d10a72f4cf5bb6fedb469812f86 2024-12-17T12:39:10,322 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/5b500d888cb94cada93cc8aa44bb3565 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/5b500d888cb94cada93cc8aa44bb3565 2024-12-17T12:39:10,322 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/8fc3f86438ac4bf5a34ef98548876232 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/8fc3f86438ac4bf5a34ef98548876232 2024-12-17T12:39:10,322 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/bc42097744664d23bbe9965e01fa7bf4 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/bc42097744664d23bbe9965e01fa7bf4 2024-12-17T12:39:10,323 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/682be2111e684da9a66be288581e025b to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/682be2111e684da9a66be288581e025b 2024-12-17T12:39:10,323 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/c57bd66151594431b4befe5155086ec5 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/c57bd66151594431b4befe5155086ec5 2024-12-17T12:39:10,323 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/0637a1377cba4fcf8df859dd346ef2ac to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/0637a1377cba4fcf8df859dd346ef2ac 2024-12-17T12:39:10,324 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/144b9877ed70448283882e7d3adccb7b to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/144b9877ed70448283882e7d3adccb7b 2024-12-17T12:39:10,324 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/60416615522a4f9b8f360b7f4396b5ca to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/60416615522a4f9b8f360b7f4396b5ca 2024-12-17T12:39:10,324 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/705f06fca91b42f2ae43e5a3a7fb0c38 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/705f06fca91b42f2ae43e5a3a7fb0c38 2024-12-17T12:39:10,324 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/519c5b946163419bb9a18d2ab8245369 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/519c5b946163419bb9a18d2ab8245369 2024-12-17T12:39:10,324 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/28304a4e5bd8450e9af24d45db360651 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/28304a4e5bd8450e9af24d45db360651 2024-12-17T12:39:10,324 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/ff3d959083764213aba22ef0b9f25928 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/ff3d959083764213aba22ef0b9f25928 2024-12-17T12:39:10,324 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/93846ba77197410aaa08e1c8cd7ffe72 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/93846ba77197410aaa08e1c8cd7ffe72 2024-12-17T12:39:10,325 DEBUG [StoreCloser-TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/95eebe543b14412685a92aeac5787a4e, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/bb72041219cd4e6a9c1d44ed078a8233, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/1f1199763c634d36885e8853a89f2366, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/9906bd394c1a49929023dc286d5877e6, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/7820923e94c8497bb2d11e679d162f91, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/2f82e69a811a4211ab1ec06115470215, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/a46096929c384d82b84eb17d98dbc4d5, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/e3e15aaabfed45bc952d9d83bf4a12d6, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/228fa8efcecf4608b64d6397d95eb269, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/73417b57c2cf4fa1b95dfc4a7234e55b, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/cba9583a6af84ccfbc2f011dcebfaa1a, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/aefd822aeffa410bba047b8ec9546387, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/821383d7f6344cc786d7d9531ac527fe, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/6dddb5c66b844d0db16c3faf5652a17f, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/60200b8fb5e34b4eb9ff41f28b7a2cfc, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/aa9df7d56f4647558473e7a7838cac79, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/d9f4f9c85240472496f49bf8644826ff, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/43eb1de6ef7142afaca7b930135d9ec7, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/0d4afa724a9a464fbc65c02c755ed8b5, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/2d7aedec2ec849aca84d511d66d8c0ed, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/a079255bc0cf4a2485a88629d958a8db, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/a3432777231a4017a41eb1ce01ef1210, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/f273fa6708cc42f4b9c0abc2a3f33c5e, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/c490327075ed490e90096613d181e375] to archive 2024-12-17T12:39:10,325 DEBUG [StoreCloser-TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-17T12:39:10,327 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/9906bd394c1a49929023dc286d5877e6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/9906bd394c1a49929023dc286d5877e6 2024-12-17T12:39:10,327 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/2f82e69a811a4211ab1ec06115470215 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/2f82e69a811a4211ab1ec06115470215 2024-12-17T12:39:10,327 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/1f1199763c634d36885e8853a89f2366 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/1f1199763c634d36885e8853a89f2366 2024-12-17T12:39:10,327 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/bb72041219cd4e6a9c1d44ed078a8233 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/bb72041219cd4e6a9c1d44ed078a8233 2024-12-17T12:39:10,327 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/95eebe543b14412685a92aeac5787a4e to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/95eebe543b14412685a92aeac5787a4e 2024-12-17T12:39:10,327 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/7820923e94c8497bb2d11e679d162f91 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/7820923e94c8497bb2d11e679d162f91 2024-12-17T12:39:10,328 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/a46096929c384d82b84eb17d98dbc4d5 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/a46096929c384d82b84eb17d98dbc4d5 2024-12-17T12:39:10,328 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/e3e15aaabfed45bc952d9d83bf4a12d6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/e3e15aaabfed45bc952d9d83bf4a12d6 2024-12-17T12:39:10,329 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/228fa8efcecf4608b64d6397d95eb269 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/228fa8efcecf4608b64d6397d95eb269 2024-12-17T12:39:10,329 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/821383d7f6344cc786d7d9531ac527fe to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/821383d7f6344cc786d7d9531ac527fe 2024-12-17T12:39:10,329 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/73417b57c2cf4fa1b95dfc4a7234e55b to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/73417b57c2cf4fa1b95dfc4a7234e55b 2024-12-17T12:39:10,329 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/cba9583a6af84ccfbc2f011dcebfaa1a to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/cba9583a6af84ccfbc2f011dcebfaa1a 2024-12-17T12:39:10,329 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/aefd822aeffa410bba047b8ec9546387 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/aefd822aeffa410bba047b8ec9546387 2024-12-17T12:39:10,329 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/6dddb5c66b844d0db16c3faf5652a17f to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/6dddb5c66b844d0db16c3faf5652a17f 2024-12-17T12:39:10,329 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/aa9df7d56f4647558473e7a7838cac79 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/aa9df7d56f4647558473e7a7838cac79 2024-12-17T12:39:10,330 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/60200b8fb5e34b4eb9ff41f28b7a2cfc to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/60200b8fb5e34b4eb9ff41f28b7a2cfc 2024-12-17T12:39:10,330 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/d9f4f9c85240472496f49bf8644826ff to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/d9f4f9c85240472496f49bf8644826ff 2024-12-17T12:39:10,331 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/0d4afa724a9a464fbc65c02c755ed8b5 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/0d4afa724a9a464fbc65c02c755ed8b5 2024-12-17T12:39:10,331 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/43eb1de6ef7142afaca7b930135d9ec7 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/43eb1de6ef7142afaca7b930135d9ec7 2024-12-17T12:39:10,331 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/a079255bc0cf4a2485a88629d958a8db to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/a079255bc0cf4a2485a88629d958a8db 2024-12-17T12:39:10,331 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/2d7aedec2ec849aca84d511d66d8c0ed to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/2d7aedec2ec849aca84d511d66d8c0ed 2024-12-17T12:39:10,331 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/a3432777231a4017a41eb1ce01ef1210 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/a3432777231a4017a41eb1ce01ef1210 2024-12-17T12:39:10,331 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/f273fa6708cc42f4b9c0abc2a3f33c5e to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/f273fa6708cc42f4b9c0abc2a3f33c5e 2024-12-17T12:39:10,331 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/c490327075ed490e90096613d181e375 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/c490327075ed490e90096613d181e375 2024-12-17T12:39:10,335 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=80}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/recovered.edits/363.seqid, newMaxSeqId=363, maxSeqId=1 2024-12-17T12:39:10,335 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=80}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984. 2024-12-17T12:39:10,335 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=80}] regionserver.HRegion(1635): Region close journal for 49a464a6255681856b85f50432ce7984: 2024-12-17T12:39:10,336 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=80}] handler.UnassignRegionHandler(170): Closed 49a464a6255681856b85f50432ce7984 2024-12-17T12:39:10,337 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=79 updating hbase:meta row=49a464a6255681856b85f50432ce7984, regionState=CLOSED 2024-12-17T12:39:10,339 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=80, resume processing ppid=79 2024-12-17T12:39:10,339 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, ppid=79, state=SUCCESS; CloseRegionProcedure 49a464a6255681856b85f50432ce7984, server=681c08bfdbdf,36491,1734439058372 in 1.4640 sec 2024-12-17T12:39:10,340 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=79, resume processing ppid=78 2024-12-17T12:39:10,340 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, ppid=78, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=49a464a6255681856b85f50432ce7984, UNASSIGN in 1.4700 sec 2024-12-17T12:39:10,341 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=77 2024-12-17T12:39:10,341 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=77, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.4730 sec 2024-12-17T12:39:10,342 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734439150342"}]},"ts":"1734439150342"} 2024-12-17T12:39:10,343 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-17T12:39:10,382 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-17T12:39:10,383 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.5650 sec 2024-12-17T12:39:10,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-17T12:39:10,931 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 77 completed 2024-12-17T12:39:10,932 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-17T12:39:10,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-17T12:39:10,937 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=81, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-17T12:39:10,938 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=81, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-17T12:39:10,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-17T12:39:10,940 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984 2024-12-17T12:39:10,944 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A, FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B, FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C, FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/recovered.edits] 2024-12-17T12:39:10,949 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/4073c95da9eb43c5aa231ebd07563947 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/4073c95da9eb43c5aa231ebd07563947 2024-12-17T12:39:10,949 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/17b7526aa87b4f53a1af1e1eda33f764 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/17b7526aa87b4f53a1af1e1eda33f764 2024-12-17T12:39:10,949 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/df6d137b3bc54654be6f371c622d185e to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/A/df6d137b3bc54654be6f371c622d185e 2024-12-17T12:39:10,954 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/034d931ab304483f8236608372e3034b to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/034d931ab304483f8236608372e3034b 2024-12-17T12:39:10,954 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/1dddf20d176643f688122ced0b797148 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/1dddf20d176643f688122ced0b797148 2024-12-17T12:39:10,954 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/8cab110b9c4248fd89d42111adee5f6d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/B/8cab110b9c4248fd89d42111adee5f6d 2024-12-17T12:39:10,959 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/b71a317a38e14161a23b9fb778453324 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/b71a317a38e14161a23b9fb778453324 2024-12-17T12:39:10,959 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/6a1ed4ff4a9e409eb3d988019eda230a to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/6a1ed4ff4a9e409eb3d988019eda230a 2024-12-17T12:39:10,959 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/e8901a24558e4e25a1b3b35d3656f828 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/C/e8901a24558e4e25a1b3b35d3656f828 2024-12-17T12:39:10,964 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/recovered.edits/363.seqid to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984/recovered.edits/363.seqid 2024-12-17T12:39:10,965 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/49a464a6255681856b85f50432ce7984 2024-12-17T12:39:10,965 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-17T12:39:10,968 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=81, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-17T12:39:10,974 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-17T12:39:10,976 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-17T12:39:10,977 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=81, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-17T12:39:10,977 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-17T12:39:10,977 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734439150977"}]},"ts":"9223372036854775807"} 2024-12-17T12:39:10,979 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-17T12:39:10,979 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 49a464a6255681856b85f50432ce7984, NAME => 'TestAcidGuarantees,,1734439124130.49a464a6255681856b85f50432ce7984.', STARTKEY => '', ENDKEY => ''}] 2024-12-17T12:39:10,979 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-17T12:39:10,979 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734439150979"}]},"ts":"9223372036854775807"} 2024-12-17T12:39:10,980 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-17T12:39:11,016 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=81, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-17T12:39:11,018 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 84 msec 2024-12-17T12:39:11,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-17T12:39:11,040 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 81 completed 2024-12-17T12:39:11,054 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=245 (was 249), OpenFileDescriptor=447 (was 460), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=341 (was 270) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3750 (was 3802) 2024-12-17T12:39:11,063 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=245, OpenFileDescriptor=447, MaxFileDescriptor=1048576, SystemLoadAverage=341, ProcessCount=11, AvailableMemoryMB=3750 2024-12-17T12:39:11,065 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-17T12:39:11,065 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-17T12:39:11,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=82, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-17T12:39:11,067 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=82, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-17T12:39:11,067 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:39:11,067 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 82 2024-12-17T12:39:11,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=82 2024-12-17T12:39:11,068 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=82, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-17T12:39:11,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742153_1329 (size=963) 2024-12-17T12:39:11,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=82 2024-12-17T12:39:11,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=82 2024-12-17T12:39:11,481 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9 2024-12-17T12:39:11,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742154_1330 (size=53) 2024-12-17T12:39:11,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=82 2024-12-17T12:39:11,893 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-17T12:39:11,894 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing a862a378d488b0f0d03cbf84efe058da, disabling compactions & flushes 2024-12-17T12:39:11,894 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:11,894 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:11,894 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. after waiting 0 ms 2024-12-17T12:39:11,894 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:11,894 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:11,894 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for a862a378d488b0f0d03cbf84efe058da: 2024-12-17T12:39:11,897 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=82, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-17T12:39:11,897 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1734439151897"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734439151897"}]},"ts":"1734439151897"} 2024-12-17T12:39:11,900 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-17T12:39:11,901 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=82, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-17T12:39:11,901 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734439151901"}]},"ts":"1734439151901"} 2024-12-17T12:39:11,902 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-17T12:39:11,924 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=83, ppid=82, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a862a378d488b0f0d03cbf84efe058da, ASSIGN}] 2024-12-17T12:39:11,926 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=83, ppid=82, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a862a378d488b0f0d03cbf84efe058da, ASSIGN 2024-12-17T12:39:11,927 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=83, ppid=82, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=a862a378d488b0f0d03cbf84efe058da, ASSIGN; state=OFFLINE, location=681c08bfdbdf,36491,1734439058372; forceNewPlan=false, retain=false 2024-12-17T12:39:12,078 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=83 updating hbase:meta row=a862a378d488b0f0d03cbf84efe058da, regionState=OPENING, regionLocation=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:12,080 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE; OpenRegionProcedure a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372}] 2024-12-17T12:39:12,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=82 2024-12-17T12:39:12,233 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:12,238 INFO [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:12,238 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(7285): Opening region: {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} 2024-12-17T12:39:12,239 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:12,239 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-17T12:39:12,239 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(7327): checking encryption for a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:12,239 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(7330): checking classloading for a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:12,241 INFO [StoreOpener-a862a378d488b0f0d03cbf84efe058da-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:12,243 INFO [StoreOpener-a862a378d488b0f0d03cbf84efe058da-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-17T12:39:12,243 INFO [StoreOpener-a862a378d488b0f0d03cbf84efe058da-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a862a378d488b0f0d03cbf84efe058da columnFamilyName A 2024-12-17T12:39:12,243 DEBUG [StoreOpener-a862a378d488b0f0d03cbf84efe058da-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:39:12,244 INFO [StoreOpener-a862a378d488b0f0d03cbf84efe058da-1 {}] regionserver.HStore(327): Store=a862a378d488b0f0d03cbf84efe058da/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-17T12:39:12,244 INFO [StoreOpener-a862a378d488b0f0d03cbf84efe058da-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:12,245 INFO [StoreOpener-a862a378d488b0f0d03cbf84efe058da-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-17T12:39:12,245 INFO [StoreOpener-a862a378d488b0f0d03cbf84efe058da-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a862a378d488b0f0d03cbf84efe058da columnFamilyName B 2024-12-17T12:39:12,245 DEBUG [StoreOpener-a862a378d488b0f0d03cbf84efe058da-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:39:12,245 INFO [StoreOpener-a862a378d488b0f0d03cbf84efe058da-1 {}] regionserver.HStore(327): Store=a862a378d488b0f0d03cbf84efe058da/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-17T12:39:12,245 INFO [StoreOpener-a862a378d488b0f0d03cbf84efe058da-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:12,246 INFO [StoreOpener-a862a378d488b0f0d03cbf84efe058da-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-17T12:39:12,246 INFO [StoreOpener-a862a378d488b0f0d03cbf84efe058da-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a862a378d488b0f0d03cbf84efe058da columnFamilyName C 2024-12-17T12:39:12,246 DEBUG [StoreOpener-a862a378d488b0f0d03cbf84efe058da-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:39:12,246 INFO [StoreOpener-a862a378d488b0f0d03cbf84efe058da-1 {}] regionserver.HStore(327): Store=a862a378d488b0f0d03cbf84efe058da/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-17T12:39:12,246 INFO [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:12,247 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:12,247 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:12,248 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-17T12:39:12,249 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(1085): writing seq id for a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:12,251 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-17T12:39:12,251 INFO [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(1102): Opened a862a378d488b0f0d03cbf84efe058da; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75341498, jitterRate=0.1226758062839508}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-17T12:39:12,251 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(1001): Region open journal for a862a378d488b0f0d03cbf84efe058da: 2024-12-17T12:39:12,252 INFO [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da., pid=84, masterSystemTime=1734439152233 2024-12-17T12:39:12,253 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:12,253 INFO [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:12,253 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=83 updating hbase:meta row=a862a378d488b0f0d03cbf84efe058da, regionState=OPEN, openSeqNum=2, regionLocation=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:12,255 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=83 2024-12-17T12:39:12,255 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=83, state=SUCCESS; OpenRegionProcedure a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 in 174 msec 2024-12-17T12:39:12,256 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=83, resume processing ppid=82 2024-12-17T12:39:12,257 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, ppid=82, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=a862a378d488b0f0d03cbf84efe058da, ASSIGN in 331 msec 2024-12-17T12:39:12,257 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=82, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-17T12:39:12,257 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734439152257"}]},"ts":"1734439152257"} 2024-12-17T12:39:12,258 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-17T12:39:12,266 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=82, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-17T12:39:12,267 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2010 sec 2024-12-17T12:39:13,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=82 2024-12-17T12:39:13,179 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 82 completed 2024-12-17T12:39:13,182 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x618c6804 to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4da7d358 2024-12-17T12:39:13,226 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@64ba86bc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:39:13,229 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:39:13,231 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52920, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:39:13,232 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-17T12:39:13,233 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33454, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-17T12:39:13,235 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-17T12:39:13,235 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-17T12:39:13,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=85, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-17T12:39:13,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742155_1331 (size=999) 2024-12-17T12:39:13,649 DEBUG [PEWorker-1 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-17T12:39:13,649 INFO [PEWorker-1 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-17T12:39:13,654 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=86, ppid=85, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-17T12:39:13,660 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=87, ppid=86, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a862a378d488b0f0d03cbf84efe058da, REOPEN/MOVE}] 2024-12-17T12:39:13,661 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=87, ppid=86, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a862a378d488b0f0d03cbf84efe058da, REOPEN/MOVE 2024-12-17T12:39:13,662 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=87 updating hbase:meta row=a862a378d488b0f0d03cbf84efe058da, regionState=CLOSING, regionLocation=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:13,663 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-17T12:39:13,663 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=88, ppid=87, state=RUNNABLE; CloseRegionProcedure a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372}] 2024-12-17T12:39:13,815 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:13,816 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] handler.UnassignRegionHandler(124): Close a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:13,816 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-17T12:39:13,816 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.HRegion(1681): Closing a862a378d488b0f0d03cbf84efe058da, disabling compactions & flushes 2024-12-17T12:39:13,816 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:13,817 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:13,817 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. after waiting 0 ms 2024-12-17T12:39:13,817 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:13,825 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-17T12:39:13,826 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:13,826 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.HRegion(1635): Region close journal for a862a378d488b0f0d03cbf84efe058da: 2024-12-17T12:39:13,826 WARN [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.HRegionServer(3786): Not adding moved region record: a862a378d488b0f0d03cbf84efe058da to self. 2024-12-17T12:39:13,828 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] handler.UnassignRegionHandler(170): Closed a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:13,828 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=87 updating hbase:meta row=a862a378d488b0f0d03cbf84efe058da, regionState=CLOSED 2024-12-17T12:39:13,831 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=88, resume processing ppid=87 2024-12-17T12:39:13,831 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, ppid=87, state=SUCCESS; CloseRegionProcedure a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 in 166 msec 2024-12-17T12:39:13,832 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=87, ppid=86, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=a862a378d488b0f0d03cbf84efe058da, REOPEN/MOVE; state=CLOSED, location=681c08bfdbdf,36491,1734439058372; forceNewPlan=false, retain=true 2024-12-17T12:39:13,982 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=87 updating hbase:meta row=a862a378d488b0f0d03cbf84efe058da, regionState=OPENING, regionLocation=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:13,985 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=89, ppid=87, state=RUNNABLE; OpenRegionProcedure a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372}] 2024-12-17T12:39:14,139 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:14,146 INFO [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=89}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:14,147 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=89}] regionserver.HRegion(7285): Opening region: {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} 2024-12-17T12:39:14,148 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=89}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:14,148 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=89}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-17T12:39:14,149 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=89}] regionserver.HRegion(7327): checking encryption for a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:14,149 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=89}] regionserver.HRegion(7330): checking classloading for a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:14,152 INFO [StoreOpener-a862a378d488b0f0d03cbf84efe058da-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:14,153 INFO [StoreOpener-a862a378d488b0f0d03cbf84efe058da-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-17T12:39:14,154 INFO [StoreOpener-a862a378d488b0f0d03cbf84efe058da-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a862a378d488b0f0d03cbf84efe058da columnFamilyName A 2024-12-17T12:39:14,156 DEBUG [StoreOpener-a862a378d488b0f0d03cbf84efe058da-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:39:14,157 INFO [StoreOpener-a862a378d488b0f0d03cbf84efe058da-1 {}] regionserver.HStore(327): Store=a862a378d488b0f0d03cbf84efe058da/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-17T12:39:14,157 INFO [StoreOpener-a862a378d488b0f0d03cbf84efe058da-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:14,158 INFO [StoreOpener-a862a378d488b0f0d03cbf84efe058da-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-17T12:39:14,158 INFO [StoreOpener-a862a378d488b0f0d03cbf84efe058da-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a862a378d488b0f0d03cbf84efe058da columnFamilyName B 2024-12-17T12:39:14,159 DEBUG [StoreOpener-a862a378d488b0f0d03cbf84efe058da-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:39:14,159 INFO [StoreOpener-a862a378d488b0f0d03cbf84efe058da-1 {}] regionserver.HStore(327): Store=a862a378d488b0f0d03cbf84efe058da/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-17T12:39:14,159 INFO [StoreOpener-a862a378d488b0f0d03cbf84efe058da-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:14,160 INFO [StoreOpener-a862a378d488b0f0d03cbf84efe058da-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-17T12:39:14,161 INFO [StoreOpener-a862a378d488b0f0d03cbf84efe058da-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a862a378d488b0f0d03cbf84efe058da columnFamilyName C 2024-12-17T12:39:14,161 DEBUG [StoreOpener-a862a378d488b0f0d03cbf84efe058da-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:39:14,161 INFO [StoreOpener-a862a378d488b0f0d03cbf84efe058da-1 {}] regionserver.HStore(327): Store=a862a378d488b0f0d03cbf84efe058da/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-17T12:39:14,162 INFO [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=89}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:14,163 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=89}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:14,164 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=89}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:14,167 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=89}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-17T12:39:14,168 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=89}] regionserver.HRegion(1085): writing seq id for a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:14,170 INFO [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=89}] regionserver.HRegion(1102): Opened a862a378d488b0f0d03cbf84efe058da; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73696240, jitterRate=0.0981595516204834}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-17T12:39:14,170 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=89}] regionserver.HRegion(1001): Region open journal for a862a378d488b0f0d03cbf84efe058da: 2024-12-17T12:39:14,171 INFO [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=89}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da., pid=89, masterSystemTime=1734439154139 2024-12-17T12:39:14,173 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=87 updating hbase:meta row=a862a378d488b0f0d03cbf84efe058da, regionState=OPEN, openSeqNum=5, regionLocation=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:14,174 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=89}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:14,174 INFO [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=89}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:14,176 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=89, resume processing ppid=87 2024-12-17T12:39:14,176 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, ppid=87, state=SUCCESS; OpenRegionProcedure a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 in 190 msec 2024-12-17T12:39:14,178 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=87, resume processing ppid=86 2024-12-17T12:39:14,178 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, ppid=86, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=a862a378d488b0f0d03cbf84efe058da, REOPEN/MOVE in 517 msec 2024-12-17T12:39:14,180 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=86, resume processing ppid=85 2024-12-17T12:39:14,180 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, ppid=85, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 525 msec 2024-12-17T12:39:14,183 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 946 msec 2024-12-17T12:39:14,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-17T12:39:14,185 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x41aa6461 to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3264421f 2024-12-17T12:39:14,241 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@25bbd89, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:39:14,242 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x748292ad to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@64665298 2024-12-17T12:39:14,250 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b0e0c68, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:39:14,251 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x76165592 to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@20322b5a 2024-12-17T12:39:14,257 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b140e78, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:39:14,258 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3440b1b1 to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5993498 2024-12-17T12:39:14,266 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4e359d9d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:39:14,266 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7976087b to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3587ce39 2024-12-17T12:39:14,274 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@573c7461, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:39:14,275 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0a0732aa to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3eacd0f7 2024-12-17T12:39:14,283 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47a39715, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:39:14,284 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x23ba8092 to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7bc9c77a 2024-12-17T12:39:14,291 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2089ec29, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:39:14,293 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1c826820 to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@29458edd 2024-12-17T12:39:14,300 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@46c2c778, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:39:14,301 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7cae6c5c to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@79982672 2024-12-17T12:39:14,308 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2931c73e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:39:14,309 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x433e2b26 to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7bad2e85 2024-12-17T12:39:14,316 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c820ef9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:39:14,321 DEBUG [hconnection-0xba68477-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:39:14,321 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-17T12:39:14,321 DEBUG [hconnection-0x2d79ea1f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:39:14,321 DEBUG [hconnection-0x9f3985b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:39:14,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=90, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=90, table=TestAcidGuarantees 2024-12-17T12:39:14,323 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52960, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:39:14,323 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52944, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:39:14,323 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52930, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:39:14,323 DEBUG [hconnection-0x5b7d5ac4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:39:14,323 DEBUG [hconnection-0x31bef7b7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:39:14,323 DEBUG [hconnection-0x67073526-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:39:14,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=90 2024-12-17T12:39:14,325 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52980, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:39:14,325 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52982, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:39:14,325 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=90, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=90, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-17T12:39:14,325 DEBUG [hconnection-0x18825739-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:39:14,325 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52970, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:39:14,326 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52998, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:39:14,327 DEBUG [hconnection-0x43a5bf7f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:39:14,327 DEBUG [hconnection-0x12270ebb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:39:14,328 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=90, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=90, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-17T12:39:14,328 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=91, ppid=90, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-17T12:39:14,328 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53016, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:39:14,328 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53000, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:39:14,329 DEBUG [hconnection-0x3893b784-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:39:14,330 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53026, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:39:14,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:14,333 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a862a378d488b0f0d03cbf84efe058da 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-17T12:39:14,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a862a378d488b0f0d03cbf84efe058da, store=A 2024-12-17T12:39:14,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:14,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a862a378d488b0f0d03cbf84efe058da, store=B 2024-12-17T12:39:14,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:14,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a862a378d488b0f0d03cbf84efe058da, store=C 2024-12-17T12:39:14,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:14,349 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:14,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439214346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:14,350 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:14,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439214347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:14,350 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:14,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439214347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:14,351 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:14,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439214347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:14,351 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:14,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439214348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:14,356 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412176a8442d0787146d2bfb9288c5b088ca2_a862a378d488b0f0d03cbf84efe058da is 50, key is test_row_0/A:col10/1734439154330/Put/seqid=0 2024-12-17T12:39:14,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742156_1332 (size=12154) 2024-12-17T12:39:14,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=90 2024-12-17T12:39:14,451 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:14,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439214450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:14,451 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:14,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439214451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:14,452 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:14,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439214451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:14,452 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:14,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439214452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:14,452 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:14,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439214452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:14,480 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:14,480 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=91 2024-12-17T12:39:14,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:14,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. as already flushing 2024-12-17T12:39:14,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:14,481 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=91}] handler.RSProcedureHandler(58): pid=91 java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:14,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=91 java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:14,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=91 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:14,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=90 2024-12-17T12:39:14,632 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:14,632 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=91 2024-12-17T12:39:14,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:14,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. as already flushing 2024-12-17T12:39:14,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:14,633 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] handler.RSProcedureHandler(58): pid=91 java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:14,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=91 java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:14,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=91 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:14,654 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:14,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439214652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:14,654 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:14,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439214652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:14,655 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:14,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439214654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:14,655 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:14,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439214654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:14,656 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:14,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439214654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:14,760 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:39:14,763 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412176a8442d0787146d2bfb9288c5b088ca2_a862a378d488b0f0d03cbf84efe058da to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412176a8442d0787146d2bfb9288c5b088ca2_a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:14,763 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/79ab10f2f5f941d89bbec4163ca59353, store: [table=TestAcidGuarantees family=A region=a862a378d488b0f0d03cbf84efe058da] 2024-12-17T12:39:14,764 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/79ab10f2f5f941d89bbec4163ca59353 is 175, key is test_row_0/A:col10/1734439154330/Put/seqid=0 2024-12-17T12:39:14,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742157_1333 (size=30955) 2024-12-17T12:39:14,784 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:14,785 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=91 2024-12-17T12:39:14,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:14,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. as already flushing 2024-12-17T12:39:14,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:14,785 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=91}] handler.RSProcedureHandler(58): pid=91 java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:14,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=91 java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:14,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=91 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:14,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=90 2024-12-17T12:39:14,936 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:14,937 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=91 2024-12-17T12:39:14,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:14,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. as already flushing 2024-12-17T12:39:14,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:14,937 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=91}] handler.RSProcedureHandler(58): pid=91 java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:14,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=91 java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:14,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=91 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:14,957 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:14,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439214955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:14,959 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:14,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439214956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:14,961 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:14,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439214957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:14,961 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:14,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439214957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:14,961 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:14,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439214957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:15,088 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:15,089 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=91 2024-12-17T12:39:15,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:15,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. as already flushing 2024-12-17T12:39:15,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:15,089 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] handler.RSProcedureHandler(58): pid=91 java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:15,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=91 java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:15,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=91 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:15,167 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=17, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/79ab10f2f5f941d89bbec4163ca59353 2024-12-17T12:39:15,186 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/B/9c57b6e5d6004733a9f98cf898df0715 is 50, key is test_row_0/B:col10/1734439154330/Put/seqid=0 2024-12-17T12:39:15,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742158_1334 (size=12001) 2024-12-17T12:39:15,241 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:15,241 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=91 2024-12-17T12:39:15,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:15,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. as already flushing 2024-12-17T12:39:15,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:15,241 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=91}] handler.RSProcedureHandler(58): pid=91 java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:15,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=91 java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:15,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=91 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:15,393 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:15,393 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=91 2024-12-17T12:39:15,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:15,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. as already flushing 2024-12-17T12:39:15,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:15,394 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=91}] handler.RSProcedureHandler(58): pid=91 java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:15,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=91 java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:15,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=91 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:15,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=90 2024-12-17T12:39:15,463 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:15,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439215461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:15,463 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:15,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439215461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:15,464 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:15,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439215462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:15,467 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:15,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439215464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:15,467 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:15,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439215464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:15,545 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:15,546 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=91 2024-12-17T12:39:15,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:15,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. as already flushing 2024-12-17T12:39:15,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:15,546 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] handler.RSProcedureHandler(58): pid=91 java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:15,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=91 java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:15,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=91 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:15,590 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/B/9c57b6e5d6004733a9f98cf898df0715 2024-12-17T12:39:15,607 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/C/79596c79a17f4257805d167097862948 is 50, key is test_row_0/C:col10/1734439154330/Put/seqid=0 2024-12-17T12:39:15,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742159_1335 (size=12001) 2024-12-17T12:39:15,698 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:15,698 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=91 2024-12-17T12:39:15,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:15,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. as already flushing 2024-12-17T12:39:15,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:15,699 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=91}] handler.RSProcedureHandler(58): pid=91 java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:15,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=91 java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:15,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=91 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:15,850 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:15,850 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=91 2024-12-17T12:39:15,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:15,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. as already flushing 2024-12-17T12:39:15,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:15,851 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=91}] handler.RSProcedureHandler(58): pid=91 java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:15,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=91 java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:15,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=91 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:15,855 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-17T12:39:16,002 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:16,003 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=91 2024-12-17T12:39:16,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:16,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. as already flushing 2024-12-17T12:39:16,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:16,003 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] handler.RSProcedureHandler(58): pid=91 java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:16,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=91 java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:16,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=91 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:16,014 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/C/79596c79a17f4257805d167097862948 2024-12-17T12:39:16,017 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/79ab10f2f5f941d89bbec4163ca59353 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/79ab10f2f5f941d89bbec4163ca59353 2024-12-17T12:39:16,020 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/79ab10f2f5f941d89bbec4163ca59353, entries=150, sequenceid=17, filesize=30.2 K 2024-12-17T12:39:16,021 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/B/9c57b6e5d6004733a9f98cf898df0715 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/9c57b6e5d6004733a9f98cf898df0715 2024-12-17T12:39:16,024 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/9c57b6e5d6004733a9f98cf898df0715, entries=150, sequenceid=17, filesize=11.7 K 2024-12-17T12:39:16,024 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/C/79596c79a17f4257805d167097862948 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/79596c79a17f4257805d167097862948 2024-12-17T12:39:16,028 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/79596c79a17f4257805d167097862948, entries=150, sequenceid=17, filesize=11.7 K 2024-12-17T12:39:16,028 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for a862a378d488b0f0d03cbf84efe058da in 1695ms, sequenceid=17, compaction requested=false 2024-12-17T12:39:16,029 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-17T12:39:16,029 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a862a378d488b0f0d03cbf84efe058da: 2024-12-17T12:39:16,155 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:16,155 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=91 2024-12-17T12:39:16,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:16,155 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.HRegion(2837): Flushing a862a378d488b0f0d03cbf84efe058da 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-17T12:39:16,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a862a378d488b0f0d03cbf84efe058da, store=A 2024-12-17T12:39:16,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:16,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a862a378d488b0f0d03cbf84efe058da, store=B 2024-12-17T12:39:16,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:16,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a862a378d488b0f0d03cbf84efe058da, store=C 2024-12-17T12:39:16,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:16,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=91}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217b52602a748f447d6a09a314e157e1dba_a862a378d488b0f0d03cbf84efe058da is 50, key is test_row_0/A:col10/1734439154341/Put/seqid=0 2024-12-17T12:39:16,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742160_1336 (size=12154) 2024-12-17T12:39:16,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=90 2024-12-17T12:39:16,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:16,467 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. as already flushing 2024-12-17T12:39:16,481 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:16,481 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:16,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439216474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:16,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439216475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:16,481 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:16,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439216477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:16,481 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:16,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439216477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:16,483 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:16,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439216478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:16,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=91}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:39:16,567 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217b52602a748f447d6a09a314e157e1dba_a862a378d488b0f0d03cbf84efe058da to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217b52602a748f447d6a09a314e157e1dba_a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:16,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=91}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/f9724ccdc7b044529644451643efdf49, store: [table=TestAcidGuarantees family=A region=a862a378d488b0f0d03cbf84efe058da] 2024-12-17T12:39:16,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=91}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/f9724ccdc7b044529644451643efdf49 is 175, key is test_row_0/A:col10/1734439154341/Put/seqid=0 2024-12-17T12:39:16,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742161_1337 (size=30955) 2024-12-17T12:39:16,584 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:16,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439216582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:16,584 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:16,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439216582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:16,585 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:16,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439216582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:16,585 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:16,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439216582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:16,589 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:16,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439216584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:16,788 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:16,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439216785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:16,788 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:16,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439216786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:16,788 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:16,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439216786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:16,789 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:16,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439216787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:16,792 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:16,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439216791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:16,971 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=91}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=40, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/f9724ccdc7b044529644451643efdf49 2024-12-17T12:39:16,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=91}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/B/9a662925d596433db10453a74156cabd is 50, key is test_row_0/B:col10/1734439154341/Put/seqid=0 2024-12-17T12:39:16,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742162_1338 (size=12001) 2024-12-17T12:39:17,093 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:17,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439217090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:17,093 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:17,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439217090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:17,096 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:17,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439217091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:17,096 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:17,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439217091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:17,097 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:17,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439217093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:17,380 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/B/9a662925d596433db10453a74156cabd 2024-12-17T12:39:17,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=91}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/C/cbd6805c8742495eabfd2164269ad127 is 50, key is test_row_0/C:col10/1734439154341/Put/seqid=0 2024-12-17T12:39:17,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742163_1339 (size=12001) 2024-12-17T12:39:17,600 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:17,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439217598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:17,600 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:17,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439217598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:17,601 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:17,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439217598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:17,605 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:17,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439217599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:17,605 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:17,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439217601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:17,794 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/C/cbd6805c8742495eabfd2164269ad127 2024-12-17T12:39:17,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/f9724ccdc7b044529644451643efdf49 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/f9724ccdc7b044529644451643efdf49 2024-12-17T12:39:17,800 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/f9724ccdc7b044529644451643efdf49, entries=150, sequenceid=40, filesize=30.2 K 2024-12-17T12:39:17,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/B/9a662925d596433db10453a74156cabd as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/9a662925d596433db10453a74156cabd 2024-12-17T12:39:17,804 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/9a662925d596433db10453a74156cabd, entries=150, sequenceid=40, filesize=11.7 K 2024-12-17T12:39:17,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/C/cbd6805c8742495eabfd2164269ad127 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/cbd6805c8742495eabfd2164269ad127 2024-12-17T12:39:17,808 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/cbd6805c8742495eabfd2164269ad127, entries=150, sequenceid=40, filesize=11.7 K 2024-12-17T12:39:17,809 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for a862a378d488b0f0d03cbf84efe058da in 1654ms, sequenceid=40, compaction requested=false 2024-12-17T12:39:17,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.HRegion(2538): Flush status journal for a862a378d488b0f0d03cbf84efe058da: 2024-12-17T12:39:17,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:17,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=91 2024-12-17T12:39:17,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4106): Remote procedure done, pid=91 2024-12-17T12:39:17,811 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=91, resume processing ppid=90 2024-12-17T12:39:17,811 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, ppid=90, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.4820 sec 2024-12-17T12:39:17,812 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=90, table=TestAcidGuarantees in 3.4900 sec 2024-12-17T12:39:18,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=90 2024-12-17T12:39:18,429 INFO [Thread-1513 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 90 completed 2024-12-17T12:39:18,429 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-17T12:39:18,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=92, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=92, table=TestAcidGuarantees 2024-12-17T12:39:18,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-12-17T12:39:18,430 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=92, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=92, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-17T12:39:18,431 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=92, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=92, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-17T12:39:18,431 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=93, ppid=92, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-17T12:39:18,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-12-17T12:39:18,582 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:18,582 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=93 2024-12-17T12:39:18,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:18,582 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] regionserver.HRegion(2837): Flushing a862a378d488b0f0d03cbf84efe058da 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-17T12:39:18,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a862a378d488b0f0d03cbf84efe058da, store=A 2024-12-17T12:39:18,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:18,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a862a378d488b0f0d03cbf84efe058da, store=B 2024-12-17T12:39:18,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:18,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a862a378d488b0f0d03cbf84efe058da, store=C 2024-12-17T12:39:18,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:18,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412170fa31a642f024d3396e63c2dc852b17e_a862a378d488b0f0d03cbf84efe058da is 50, key is test_row_0/A:col10/1734439156477/Put/seqid=0 2024-12-17T12:39:18,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742164_1340 (size=12154) 2024-12-17T12:39:18,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:18,605 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. as already flushing 2024-12-17T12:39:18,629 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:18,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439218625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:18,630 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:18,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439218626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:18,633 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:18,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439218629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:18,633 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:18,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439218629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:18,634 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:18,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439218629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:18,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-12-17T12:39:18,732 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:18,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439218730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:18,735 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:18,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439218731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:18,737 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:18,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439218734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:18,737 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:18,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439218734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:18,738 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:18,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439218734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:18,936 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:18,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439218934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:18,937 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:18,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439218936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:18,942 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:18,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439218939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:18,942 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:18,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439218939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:18,943 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:18,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439218939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:18,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:39:18,995 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412170fa31a642f024d3396e63c2dc852b17e_a862a378d488b0f0d03cbf84efe058da to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412170fa31a642f024d3396e63c2dc852b17e_a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:18,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/cd283b40d9734f1bb8fbd80403049364, store: [table=TestAcidGuarantees family=A region=a862a378d488b0f0d03cbf84efe058da] 2024-12-17T12:39:18,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/cd283b40d9734f1bb8fbd80403049364 is 175, key is test_row_0/A:col10/1734439156477/Put/seqid=0 2024-12-17T12:39:19,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742165_1341 (size=30955) 2024-12-17T12:39:19,001 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=53, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/cd283b40d9734f1bb8fbd80403049364 2024-12-17T12:39:19,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/B/ad86b6f6c46941708bb2fcd361e1825d is 50, key is test_row_0/B:col10/1734439156477/Put/seqid=0 2024-12-17T12:39:19,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742166_1342 (size=12001) 2024-12-17T12:39:19,013 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/B/ad86b6f6c46941708bb2fcd361e1825d 2024-12-17T12:39:19,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/C/e34da04e14d24e628da8656ca82427e7 is 50, key is test_row_0/C:col10/1734439156477/Put/seqid=0 2024-12-17T12:39:19,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742167_1343 (size=12001) 2024-12-17T12:39:19,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-12-17T12:39:19,238 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:19,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439219238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:19,242 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:19,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439219239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:19,247 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:19,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439219243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:19,247 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:19,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439219243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:19,247 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:19,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439219244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:19,425 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/C/e34da04e14d24e628da8656ca82427e7 2024-12-17T12:39:19,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/cd283b40d9734f1bb8fbd80403049364 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/cd283b40d9734f1bb8fbd80403049364 2024-12-17T12:39:19,465 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/cd283b40d9734f1bb8fbd80403049364, entries=150, sequenceid=53, filesize=30.2 K 2024-12-17T12:39:19,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/B/ad86b6f6c46941708bb2fcd361e1825d as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/ad86b6f6c46941708bb2fcd361e1825d 2024-12-17T12:39:19,469 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/ad86b6f6c46941708bb2fcd361e1825d, entries=150, sequenceid=53, filesize=11.7 K 2024-12-17T12:39:19,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/C/e34da04e14d24e628da8656ca82427e7 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/e34da04e14d24e628da8656ca82427e7 2024-12-17T12:39:19,472 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/e34da04e14d24e628da8656ca82427e7, entries=150, sequenceid=53, filesize=11.7 K 2024-12-17T12:39:19,473 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for a862a378d488b0f0d03cbf84efe058da in 891ms, sequenceid=53, compaction requested=true 2024-12-17T12:39:19,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] regionserver.HRegion(2538): Flush status journal for a862a378d488b0f0d03cbf84efe058da: 2024-12-17T12:39:19,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:19,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=93 2024-12-17T12:39:19,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4106): Remote procedure done, pid=93 2024-12-17T12:39:19,475 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=93, resume processing ppid=92 2024-12-17T12:39:19,475 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, ppid=92, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0430 sec 2024-12-17T12:39:19,476 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=92, table=TestAcidGuarantees in 1.0460 sec 2024-12-17T12:39:19,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-12-17T12:39:19,533 INFO [Thread-1513 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 92 completed 2024-12-17T12:39:19,534 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-17T12:39:19,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=94, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=94, table=TestAcidGuarantees 2024-12-17T12:39:19,534 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=94, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=94, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-17T12:39:19,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-12-17T12:39:19,535 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=94, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=94, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-17T12:39:19,535 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=95, ppid=94, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-17T12:39:19,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-12-17T12:39:19,686 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:19,686 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=95 2024-12-17T12:39:19,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:19,686 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HRegion(2837): Flushing a862a378d488b0f0d03cbf84efe058da 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-17T12:39:19,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a862a378d488b0f0d03cbf84efe058da, store=A 2024-12-17T12:39:19,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:19,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a862a378d488b0f0d03cbf84efe058da, store=B 2024-12-17T12:39:19,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:19,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a862a378d488b0f0d03cbf84efe058da, store=C 2024-12-17T12:39:19,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:19,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121778a542e1245549c6a0ab41da46fb92b9_a862a378d488b0f0d03cbf84efe058da is 50, key is test_row_0/A:col10/1734439158629/Put/seqid=0 2024-12-17T12:39:19,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742168_1344 (size=12154) 2024-12-17T12:39:19,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:19,746 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. as already flushing 2024-12-17T12:39:19,758 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:19,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439219752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:19,761 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:19,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439219755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:19,761 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:19,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439219756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:19,762 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:19,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439219757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:19,762 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:19,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439219758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:19,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-12-17T12:39:19,863 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:19,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439219859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:19,866 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:19,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439219862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:19,866 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:19,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439219862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:19,866 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:19,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439219862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:19,866 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:19,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439219863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:20,066 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:20,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439220064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:20,069 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:20,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439220067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:20,071 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:20,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439220067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:20,071 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:20,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439220067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:20,071 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:20,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439220067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:20,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:39:20,098 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121778a542e1245549c6a0ab41da46fb92b9_a862a378d488b0f0d03cbf84efe058da to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121778a542e1245549c6a0ab41da46fb92b9_a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:20,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/9b74acb9aaea4384b1cfff27da7ddff1, store: [table=TestAcidGuarantees family=A region=a862a378d488b0f0d03cbf84efe058da] 2024-12-17T12:39:20,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/9b74acb9aaea4384b1cfff27da7ddff1 is 175, key is test_row_0/A:col10/1734439158629/Put/seqid=0 2024-12-17T12:39:20,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742169_1345 (size=30955) 2024-12-17T12:39:20,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-12-17T12:39:20,371 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:20,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439220368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:20,375 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:20,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439220372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:20,375 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:20,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439220372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:20,376 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:20,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439220373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:20,376 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:20,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439220374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:20,502 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=76, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/9b74acb9aaea4384b1cfff27da7ddff1 2024-12-17T12:39:20,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/B/7b51d39a2f804e278fa495626d5a343d is 50, key is test_row_0/B:col10/1734439158629/Put/seqid=0 2024-12-17T12:39:20,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742170_1346 (size=12001) 2024-12-17T12:39:20,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-12-17T12:39:20,873 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:20,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439220871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:20,881 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:20,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439220877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:20,883 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:20,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439220879, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:20,883 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:20,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439220880, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:20,883 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:20,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439220881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:20,911 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/B/7b51d39a2f804e278fa495626d5a343d 2024-12-17T12:39:20,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/C/a83614a577704ecda9a7f5bead4cec7f is 50, key is test_row_0/C:col10/1734439158629/Put/seqid=0 2024-12-17T12:39:20,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742171_1347 (size=12001) 2024-12-17T12:39:20,922 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/C/a83614a577704ecda9a7f5bead4cec7f 2024-12-17T12:39:20,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/9b74acb9aaea4384b1cfff27da7ddff1 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/9b74acb9aaea4384b1cfff27da7ddff1 2024-12-17T12:39:20,928 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/9b74acb9aaea4384b1cfff27da7ddff1, entries=150, sequenceid=76, filesize=30.2 K 2024-12-17T12:39:20,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/B/7b51d39a2f804e278fa495626d5a343d as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/7b51d39a2f804e278fa495626d5a343d 2024-12-17T12:39:20,931 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/7b51d39a2f804e278fa495626d5a343d, entries=150, sequenceid=76, filesize=11.7 K 2024-12-17T12:39:20,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/C/a83614a577704ecda9a7f5bead4cec7f as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/a83614a577704ecda9a7f5bead4cec7f 2024-12-17T12:39:20,935 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/a83614a577704ecda9a7f5bead4cec7f, entries=150, sequenceid=76, filesize=11.7 K 2024-12-17T12:39:20,936 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for a862a378d488b0f0d03cbf84efe058da in 1250ms, sequenceid=76, compaction requested=true 2024-12-17T12:39:20,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HRegion(2538): Flush status journal for a862a378d488b0f0d03cbf84efe058da: 2024-12-17T12:39:20,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:20,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=95 2024-12-17T12:39:20,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4106): Remote procedure done, pid=95 2024-12-17T12:39:20,937 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=95, resume processing ppid=94 2024-12-17T12:39:20,938 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, ppid=94, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4020 sec 2024-12-17T12:39:20,938 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=94, table=TestAcidGuarantees in 1.4040 sec 2024-12-17T12:39:21,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-12-17T12:39:21,638 INFO [Thread-1513 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 94 completed 2024-12-17T12:39:21,639 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-17T12:39:21,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=96, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=96, table=TestAcidGuarantees 2024-12-17T12:39:21,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-17T12:39:21,640 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=96, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=96, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-17T12:39:21,640 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=96, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=96, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-17T12:39:21,641 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=97, ppid=96, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-17T12:39:21,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-17T12:39:21,792 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:21,792 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-12-17T12:39:21,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:21,792 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2837): Flushing a862a378d488b0f0d03cbf84efe058da 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-17T12:39:21,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a862a378d488b0f0d03cbf84efe058da, store=A 2024-12-17T12:39:21,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:21,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a862a378d488b0f0d03cbf84efe058da, store=B 2024-12-17T12:39:21,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:21,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a862a378d488b0f0d03cbf84efe058da, store=C 2024-12-17T12:39:21,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:21,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412174c758af4c37d4a12bcf08767a79c8bab_a862a378d488b0f0d03cbf84efe058da is 50, key is test_row_0/A:col10/1734439159749/Put/seqid=0 2024-12-17T12:39:21,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742172_1348 (size=12154) 2024-12-17T12:39:21,887 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. as already flushing 2024-12-17T12:39:21,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:21,938 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:21,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439221932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:21,938 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:21,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439221932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:21,938 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:21,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439221932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:21,938 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:21,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439221932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:21,938 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:21,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439221932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:21,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-17T12:39:22,043 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:22,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439222039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:22,043 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:22,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439222039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:22,043 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:22,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439222039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:22,044 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:22,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439222039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:22,044 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:22,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439222039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:22,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:39:22,205 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412174c758af4c37d4a12bcf08767a79c8bab_a862a378d488b0f0d03cbf84efe058da to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412174c758af4c37d4a12bcf08767a79c8bab_a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:22,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/b3889581d61c4998a49f72246bf6029c, store: [table=TestAcidGuarantees family=A region=a862a378d488b0f0d03cbf84efe058da] 2024-12-17T12:39:22,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/b3889581d61c4998a49f72246bf6029c is 175, key is test_row_0/A:col10/1734439159749/Put/seqid=0 2024-12-17T12:39:22,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742173_1349 (size=30955) 2024-12-17T12:39:22,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-17T12:39:22,246 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:22,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439222244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:22,246 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:22,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439222245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:22,246 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:22,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439222245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:22,247 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:22,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439222245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:22,250 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:22,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439222246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:22,550 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:22,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439222547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:22,552 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:22,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439222548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:22,553 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:22,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439222549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:22,553 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:22,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439222549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:22,553 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:22,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439222550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:22,609 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=89, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/b3889581d61c4998a49f72246bf6029c 2024-12-17T12:39:22,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/B/aa9d3bc965e24dceae1f6b786670c8da is 50, key is test_row_0/B:col10/1734439159749/Put/seqid=0 2024-12-17T12:39:22,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742174_1350 (size=12001) 2024-12-17T12:39:22,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-17T12:39:23,018 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/B/aa9d3bc965e24dceae1f6b786670c8da 2024-12-17T12:39:23,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/C/b338e807acfc45e2a93466bc5a1de6dc is 50, key is test_row_0/C:col10/1734439159749/Put/seqid=0 2024-12-17T12:39:23,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742175_1351 (size=12001) 2024-12-17T12:39:23,057 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:23,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439223053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:23,057 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:23,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439223053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:23,058 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:23,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439223054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:23,058 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:23,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439223055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:23,060 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:23,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439223058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:23,427 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/C/b338e807acfc45e2a93466bc5a1de6dc 2024-12-17T12:39:23,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/b3889581d61c4998a49f72246bf6029c as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/b3889581d61c4998a49f72246bf6029c 2024-12-17T12:39:23,433 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/b3889581d61c4998a49f72246bf6029c, entries=150, sequenceid=89, filesize=30.2 K 2024-12-17T12:39:23,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/B/aa9d3bc965e24dceae1f6b786670c8da as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/aa9d3bc965e24dceae1f6b786670c8da 2024-12-17T12:39:23,438 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/aa9d3bc965e24dceae1f6b786670c8da, entries=150, sequenceid=89, filesize=11.7 K 2024-12-17T12:39:23,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/C/b338e807acfc45e2a93466bc5a1de6dc as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/b338e807acfc45e2a93466bc5a1de6dc 2024-12-17T12:39:23,441 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/b338e807acfc45e2a93466bc5a1de6dc, entries=150, sequenceid=89, filesize=11.7 K 2024-12-17T12:39:23,442 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for a862a378d488b0f0d03cbf84efe058da in 1650ms, sequenceid=89, compaction requested=true 2024-12-17T12:39:23,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2538): Flush status journal for a862a378d488b0f0d03cbf84efe058da: 2024-12-17T12:39:23,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:23,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=97 2024-12-17T12:39:23,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4106): Remote procedure done, pid=97 2024-12-17T12:39:23,443 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=97, resume processing ppid=96 2024-12-17T12:39:23,443 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, ppid=96, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8030 sec 2024-12-17T12:39:23,444 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=96, table=TestAcidGuarantees in 1.8050 sec 2024-12-17T12:39:23,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-17T12:39:23,744 INFO [Thread-1513 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 96 completed 2024-12-17T12:39:23,745 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-17T12:39:23,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=98, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=98, table=TestAcidGuarantees 2024-12-17T12:39:23,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-17T12:39:23,746 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=98, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=98, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-17T12:39:23,746 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=98, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=98, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-17T12:39:23,746 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=99, ppid=98, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-17T12:39:23,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-17T12:39:23,897 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:23,898 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-12-17T12:39:23,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:23,898 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2837): Flushing a862a378d488b0f0d03cbf84efe058da 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-17T12:39:23,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a862a378d488b0f0d03cbf84efe058da, store=A 2024-12-17T12:39:23,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:23,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a862a378d488b0f0d03cbf84efe058da, store=B 2024-12-17T12:39:23,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:23,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a862a378d488b0f0d03cbf84efe058da, store=C 2024-12-17T12:39:23,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:23,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217444ad12b227b4e3c912f9366d1d5ae2d_a862a378d488b0f0d03cbf84efe058da is 50, key is test_row_0/A:col10/1734439161931/Put/seqid=0 2024-12-17T12:39:23,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742176_1352 (size=12154) 2024-12-17T12:39:24,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-17T12:39:24,065 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. as already flushing 2024-12-17T12:39:24,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:24,078 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:24,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439224072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:24,078 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:24,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439224073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:24,078 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:24,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439224075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:24,081 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:24,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439224076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:24,082 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:24,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439224078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:24,182 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:24,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439224179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:24,182 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:24,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439224179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:24,183 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:24,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439224179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:24,183 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:24,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439224182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:24,183 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:24,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439224182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:24,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:39:24,311 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217444ad12b227b4e3c912f9366d1d5ae2d_a862a378d488b0f0d03cbf84efe058da to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217444ad12b227b4e3c912f9366d1d5ae2d_a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:24,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/2ac4d0d833fd4560a6253b8044716d95, store: [table=TestAcidGuarantees family=A region=a862a378d488b0f0d03cbf84efe058da] 2024-12-17T12:39:24,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/2ac4d0d833fd4560a6253b8044716d95 is 175, key is test_row_0/A:col10/1734439161931/Put/seqid=0 2024-12-17T12:39:24,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742177_1353 (size=30955) 2024-12-17T12:39:24,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-17T12:39:24,384 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:24,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439224383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:24,384 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:24,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439224383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:24,385 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:24,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439224383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:24,385 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:24,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439224383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:24,390 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:24,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439224385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:24,690 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:24,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439224685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:24,690 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:24,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439224685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:24,690 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:24,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439224686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:24,691 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:24,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439224687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:24,696 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:24,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439224691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:24,716 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=112, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/2ac4d0d833fd4560a6253b8044716d95 2024-12-17T12:39:24,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/B/fcdbc25e28594b8aabbc73da47b1c734 is 50, key is test_row_0/B:col10/1734439161931/Put/seqid=0 2024-12-17T12:39:24,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742178_1354 (size=12001) 2024-12-17T12:39:24,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-17T12:39:25,126 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=112 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/B/fcdbc25e28594b8aabbc73da47b1c734 2024-12-17T12:39:25,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/C/c6aa40e5d56741e78666644da55d6ad6 is 50, key is test_row_0/C:col10/1734439161931/Put/seqid=0 2024-12-17T12:39:25,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742179_1355 (size=12001) 2024-12-17T12:39:25,135 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=112 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/C/c6aa40e5d56741e78666644da55d6ad6 2024-12-17T12:39:25,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/2ac4d0d833fd4560a6253b8044716d95 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/2ac4d0d833fd4560a6253b8044716d95 2024-12-17T12:39:25,141 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/2ac4d0d833fd4560a6253b8044716d95, entries=150, sequenceid=112, filesize=30.2 K 2024-12-17T12:39:25,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/B/fcdbc25e28594b8aabbc73da47b1c734 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/fcdbc25e28594b8aabbc73da47b1c734 2024-12-17T12:39:25,147 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/fcdbc25e28594b8aabbc73da47b1c734, entries=150, sequenceid=112, filesize=11.7 K 2024-12-17T12:39:25,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/C/c6aa40e5d56741e78666644da55d6ad6 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/c6aa40e5d56741e78666644da55d6ad6 2024-12-17T12:39:25,152 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/c6aa40e5d56741e78666644da55d6ad6, entries=150, sequenceid=112, filesize=11.7 K 2024-12-17T12:39:25,153 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for a862a378d488b0f0d03cbf84efe058da in 1255ms, sequenceid=112, compaction requested=true 2024-12-17T12:39:25,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2538): Flush status journal for a862a378d488b0f0d03cbf84efe058da: 2024-12-17T12:39:25,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:25,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=99 2024-12-17T12:39:25,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4106): Remote procedure done, pid=99 2024-12-17T12:39:25,156 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=99, resume processing ppid=98 2024-12-17T12:39:25,156 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, ppid=98, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4080 sec 2024-12-17T12:39:25,157 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=98, table=TestAcidGuarantees in 1.4110 sec 2024-12-17T12:39:25,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:25,199 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a862a378d488b0f0d03cbf84efe058da 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-17T12:39:25,199 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a862a378d488b0f0d03cbf84efe058da, store=A 2024-12-17T12:39:25,199 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:25,199 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a862a378d488b0f0d03cbf84efe058da, store=B 2024-12-17T12:39:25,200 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:25,200 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a862a378d488b0f0d03cbf84efe058da, store=C 2024-12-17T12:39:25,200 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:25,216 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412175a0b91d76a6444228b8739db7f60a46b_a862a378d488b0f0d03cbf84efe058da is 50, key is test_row_0/A:col10/1734439164072/Put/seqid=0 2024-12-17T12:39:25,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742180_1356 (size=17034) 2024-12-17T12:39:25,235 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:25,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439225225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:25,236 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:25,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439225228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:25,236 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:25,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439225228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:25,236 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:25,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439225229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:25,236 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:25,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439225229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:25,340 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:25,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439225336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:25,340 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:25,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439225336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:25,341 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:25,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439225336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:25,341 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:25,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439225337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:25,341 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:25,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439225337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:25,546 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:25,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439225541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:25,546 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:25,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439225542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:25,547 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:25,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439225542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:25,548 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:25,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439225542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:25,548 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:25,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439225542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:25,620 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:39:25,623 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412175a0b91d76a6444228b8739db7f60a46b_a862a378d488b0f0d03cbf84efe058da to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412175a0b91d76a6444228b8739db7f60a46b_a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:25,624 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/5fcf78e2573c4fce87ee4ebaa2fe6755, store: [table=TestAcidGuarantees family=A region=a862a378d488b0f0d03cbf84efe058da] 2024-12-17T12:39:25,625 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/5fcf78e2573c4fce87ee4ebaa2fe6755 is 175, key is test_row_0/A:col10/1734439164072/Put/seqid=0 2024-12-17T12:39:25,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742181_1357 (size=48139) 2024-12-17T12:39:25,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-17T12:39:25,849 INFO [Thread-1513 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 98 completed 2024-12-17T12:39:25,850 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-17T12:39:25,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=100, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees 2024-12-17T12:39:25,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-17T12:39:25,851 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=100, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-17T12:39:25,851 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=100, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-17T12:39:25,851 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=101, ppid=100, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-17T12:39:25,853 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:25,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439225847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:25,853 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:25,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439225848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:25,853 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:25,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439225849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:25,854 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:25,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439225849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:25,854 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:25,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439225850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:25,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-17T12:39:26,002 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:26,003 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-12-17T12:39:26,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:26,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. as already flushing 2024-12-17T12:39:26,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:26,003 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:26,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:26,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:26,028 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=127, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/5fcf78e2573c4fce87ee4ebaa2fe6755 2024-12-17T12:39:26,034 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/B/30c8697974de469d907bed9bb57219c6 is 50, key is test_row_0/B:col10/1734439164072/Put/seqid=0 2024-12-17T12:39:26,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742182_1358 (size=12001) 2024-12-17T12:39:26,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-17T12:39:26,155 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:26,155 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-12-17T12:39:26,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:26,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. as already flushing 2024-12-17T12:39:26,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:26,155 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:26,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:26,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:26,307 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:26,307 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-12-17T12:39:26,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:26,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. as already flushing 2024-12-17T12:39:26,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:26,308 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:26,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:26,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:26,361 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:26,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439226356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:26,361 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:26,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439226356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:26,362 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:26,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439226356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:26,362 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:26,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439226357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:26,362 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:26,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439226357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:26,438 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/B/30c8697974de469d907bed9bb57219c6 2024-12-17T12:39:26,448 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/C/0f7333d9ae9844c0bbd27957b2ed192a is 50, key is test_row_0/C:col10/1734439164072/Put/seqid=0 2024-12-17T12:39:26,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-17T12:39:26,459 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:26,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742183_1359 (size=12001) 2024-12-17T12:39:26,460 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-12-17T12:39:26,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:26,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. as already flushing 2024-12-17T12:39:26,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:26,460 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:26,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:26,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:26,464 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/C/0f7333d9ae9844c0bbd27957b2ed192a 2024-12-17T12:39:26,468 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/5fcf78e2573c4fce87ee4ebaa2fe6755 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/5fcf78e2573c4fce87ee4ebaa2fe6755 2024-12-17T12:39:26,471 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/5fcf78e2573c4fce87ee4ebaa2fe6755, entries=250, sequenceid=127, filesize=47.0 K 2024-12-17T12:39:26,472 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/B/30c8697974de469d907bed9bb57219c6 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/30c8697974de469d907bed9bb57219c6 2024-12-17T12:39:26,475 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/30c8697974de469d907bed9bb57219c6, entries=150, sequenceid=127, filesize=11.7 K 2024-12-17T12:39:26,476 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/C/0f7333d9ae9844c0bbd27957b2ed192a as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/0f7333d9ae9844c0bbd27957b2ed192a 2024-12-17T12:39:26,479 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/0f7333d9ae9844c0bbd27957b2ed192a, entries=150, sequenceid=127, filesize=11.7 K 2024-12-17T12:39:26,480 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for a862a378d488b0f0d03cbf84efe058da in 1282ms, sequenceid=127, compaction requested=true 2024-12-17T12:39:26,480 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a862a378d488b0f0d03cbf84efe058da: 2024-12-17T12:39:26,480 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a862a378d488b0f0d03cbf84efe058da:A, priority=-2147483648, current under compaction store size is 1 2024-12-17T12:39:26,480 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:39:26,480 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 7 store files, 0 compacting, 7 eligible, 16 blocking 2024-12-17T12:39:26,480 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a862a378d488b0f0d03cbf84efe058da:B, priority=-2147483648, current under compaction store size is 2 2024-12-17T12:39:26,480 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:39:26,480 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 7 store files, 0 compacting, 7 eligible, 16 blocking 2024-12-17T12:39:26,480 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a862a378d488b0f0d03cbf84efe058da:C, priority=-2147483648, current under compaction store size is 3 2024-12-17T12:39:26,480 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:39:26,481 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 7 files of size 84007 starting at candidate #0 after considering 15 permutations with 15 in ratio 2024-12-17T12:39:26,481 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): a862a378d488b0f0d03cbf84efe058da/B is initiating minor compaction (all files) 2024-12-17T12:39:26,481 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a862a378d488b0f0d03cbf84efe058da/B in TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:26,481 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/9c57b6e5d6004733a9f98cf898df0715, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/9a662925d596433db10453a74156cabd, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/ad86b6f6c46941708bb2fcd361e1825d, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/7b51d39a2f804e278fa495626d5a343d, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/aa9d3bc965e24dceae1f6b786670c8da, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/fcdbc25e28594b8aabbc73da47b1c734, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/30c8697974de469d907bed9bb57219c6] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp, totalSize=82.0 K 2024-12-17T12:39:26,482 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 7 files of size 233869 starting at candidate #0 after considering 15 permutations with 15 in ratio 2024-12-17T12:39:26,482 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 9c57b6e5d6004733a9f98cf898df0715, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1734439154330 2024-12-17T12:39:26,482 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): a862a378d488b0f0d03cbf84efe058da/A is initiating minor compaction (all files) 2024-12-17T12:39:26,482 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a862a378d488b0f0d03cbf84efe058da/A in TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:26,482 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/79ab10f2f5f941d89bbec4163ca59353, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/f9724ccdc7b044529644451643efdf49, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/cd283b40d9734f1bb8fbd80403049364, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/9b74acb9aaea4384b1cfff27da7ddff1, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/b3889581d61c4998a49f72246bf6029c, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/2ac4d0d833fd4560a6253b8044716d95, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/5fcf78e2573c4fce87ee4ebaa2fe6755] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp, totalSize=228.4 K 2024-12-17T12:39:26,482 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=9 throughput controller=DefaultCompactionThroughputController [maxThroughput=53.85 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:26,482 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. files: [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/79ab10f2f5f941d89bbec4163ca59353, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/f9724ccdc7b044529644451643efdf49, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/cd283b40d9734f1bb8fbd80403049364, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/9b74acb9aaea4384b1cfff27da7ddff1, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/b3889581d61c4998a49f72246bf6029c, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/2ac4d0d833fd4560a6253b8044716d95, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/5fcf78e2573c4fce87ee4ebaa2fe6755] 2024-12-17T12:39:26,483 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 9a662925d596433db10453a74156cabd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1734439154341 2024-12-17T12:39:26,483 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting ad86b6f6c46941708bb2fcd361e1825d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1734439156473 2024-12-17T12:39:26,483 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 79ab10f2f5f941d89bbec4163ca59353, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1734439154330 2024-12-17T12:39:26,483 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 7b51d39a2f804e278fa495626d5a343d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1734439158625 2024-12-17T12:39:26,483 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting f9724ccdc7b044529644451643efdf49, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1734439154341 2024-12-17T12:39:26,484 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting cd283b40d9734f1bb8fbd80403049364, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1734439156473 2024-12-17T12:39:26,484 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting aa9d3bc965e24dceae1f6b786670c8da, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1734439159749 2024-12-17T12:39:26,484 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting fcdbc25e28594b8aabbc73da47b1c734, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=112, earliestPutTs=1734439161905 2024-12-17T12:39:26,484 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9b74acb9aaea4384b1cfff27da7ddff1, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1734439158625 2024-12-17T12:39:26,484 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting b3889581d61c4998a49f72246bf6029c, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1734439159749 2024-12-17T12:39:26,484 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 30c8697974de469d907bed9bb57219c6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1734439164072 2024-12-17T12:39:26,484 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2ac4d0d833fd4560a6253b8044716d95, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=112, earliestPutTs=1734439161905 2024-12-17T12:39:26,484 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5fcf78e2573c4fce87ee4ebaa2fe6755, keycount=250, bloomtype=ROW, size=47.0 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1734439164072 2024-12-17T12:39:26,492 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a862a378d488b0f0d03cbf84efe058da] 2024-12-17T12:39:26,493 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241217f976e2eaea92402f9a5f938c141421b4_a862a378d488b0f0d03cbf84efe058da store=[table=TestAcidGuarantees family=A region=a862a378d488b0f0d03cbf84efe058da] 2024-12-17T12:39:26,494 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a862a378d488b0f0d03cbf84efe058da#B#compaction#300 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 53.85 MB/second 2024-12-17T12:39:26,495 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/B/45dd81309808441eaf17f7d0e3a4c8d5 is 50, key is test_row_0/B:col10/1734439164072/Put/seqid=0 2024-12-17T12:39:26,497 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241217f976e2eaea92402f9a5f938c141421b4_a862a378d488b0f0d03cbf84efe058da, store=[table=TestAcidGuarantees family=A region=a862a378d488b0f0d03cbf84efe058da] 2024-12-17T12:39:26,497 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217f976e2eaea92402f9a5f938c141421b4_a862a378d488b0f0d03cbf84efe058da because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a862a378d488b0f0d03cbf84efe058da] 2024-12-17T12:39:26,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742184_1360 (size=12241) 2024-12-17T12:39:26,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742185_1361 (size=4469) 2024-12-17T12:39:26,612 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:26,613 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-12-17T12:39:26,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:26,613 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2837): Flushing a862a378d488b0f0d03cbf84efe058da 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-17T12:39:26,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a862a378d488b0f0d03cbf84efe058da, store=A 2024-12-17T12:39:26,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:26,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a862a378d488b0f0d03cbf84efe058da, store=B 2024-12-17T12:39:26,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:26,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a862a378d488b0f0d03cbf84efe058da, store=C 2024-12-17T12:39:26,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:26,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412175191f8459fff42c4b1964602cbc7678b_a862a378d488b0f0d03cbf84efe058da is 50, key is test_row_0/A:col10/1734439165224/Put/seqid=0 2024-12-17T12:39:26,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742186_1362 (size=12304) 2024-12-17T12:39:26,901 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a862a378d488b0f0d03cbf84efe058da#A#compaction#299 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-12-17T12:39:26,902 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/15bec26e3bb347c49460f7c11db37eb4 is 175, key is test_row_0/A:col10/1734439164072/Put/seqid=0 2024-12-17T12:39:26,904 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/B/45dd81309808441eaf17f7d0e3a4c8d5 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/45dd81309808441eaf17f7d0e3a4c8d5 2024-12-17T12:39:26,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742187_1363 (size=31195) 2024-12-17T12:39:26,908 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 7 (all) file(s) in a862a378d488b0f0d03cbf84efe058da/B of a862a378d488b0f0d03cbf84efe058da into 45dd81309808441eaf17f7d0e3a4c8d5(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:39:26,908 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a862a378d488b0f0d03cbf84efe058da: 2024-12-17T12:39:26,908 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da., storeName=a862a378d488b0f0d03cbf84efe058da/B, priority=9, startTime=1734439166480; duration=0sec 2024-12-17T12:39:26,908 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:39:26,908 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a862a378d488b0f0d03cbf84efe058da:B 2024-12-17T12:39:26,908 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 7 store files, 0 compacting, 7 eligible, 16 blocking 2024-12-17T12:39:26,911 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 7 files of size 84007 starting at candidate #0 after considering 15 permutations with 15 in ratio 2024-12-17T12:39:26,911 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): a862a378d488b0f0d03cbf84efe058da/C is initiating minor compaction (all files) 2024-12-17T12:39:26,911 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a862a378d488b0f0d03cbf84efe058da/C in TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:26,911 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/79596c79a17f4257805d167097862948, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/cbd6805c8742495eabfd2164269ad127, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/e34da04e14d24e628da8656ca82427e7, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/a83614a577704ecda9a7f5bead4cec7f, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/b338e807acfc45e2a93466bc5a1de6dc, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/c6aa40e5d56741e78666644da55d6ad6, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/0f7333d9ae9844c0bbd27957b2ed192a] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp, totalSize=82.0 K 2024-12-17T12:39:26,911 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 79596c79a17f4257805d167097862948, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1734439154330 2024-12-17T12:39:26,912 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting cbd6805c8742495eabfd2164269ad127, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1734439154341 2024-12-17T12:39:26,912 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting e34da04e14d24e628da8656ca82427e7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1734439156473 2024-12-17T12:39:26,912 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting a83614a577704ecda9a7f5bead4cec7f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1734439158625 2024-12-17T12:39:26,912 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting b338e807acfc45e2a93466bc5a1de6dc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1734439159749 2024-12-17T12:39:26,913 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting c6aa40e5d56741e78666644da55d6ad6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=112, earliestPutTs=1734439161905 2024-12-17T12:39:26,913 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 0f7333d9ae9844c0bbd27957b2ed192a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1734439164072 2024-12-17T12:39:26,923 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a862a378d488b0f0d03cbf84efe058da#C#compaction#302 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-12-17T12:39:26,923 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/C/85e618f25428426ba147c59ec6072c2d is 50, key is test_row_0/C:col10/1734439164072/Put/seqid=0 2024-12-17T12:39:26,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742188_1364 (size=12241) 2024-12-17T12:39:26,930 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/C/85e618f25428426ba147c59ec6072c2d as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/85e618f25428426ba147c59ec6072c2d 2024-12-17T12:39:26,934 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 7 (all) file(s) in a862a378d488b0f0d03cbf84efe058da/C of a862a378d488b0f0d03cbf84efe058da into 85e618f25428426ba147c59ec6072c2d(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:39:26,934 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a862a378d488b0f0d03cbf84efe058da: 2024-12-17T12:39:26,934 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da., storeName=a862a378d488b0f0d03cbf84efe058da/C, priority=9, startTime=1734439166480; duration=0sec 2024-12-17T12:39:26,934 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:39:26,934 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a862a378d488b0f0d03cbf84efe058da:C 2024-12-17T12:39:26,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-17T12:39:27,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:39:27,025 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412175191f8459fff42c4b1964602cbc7678b_a862a378d488b0f0d03cbf84efe058da to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412175191f8459fff42c4b1964602cbc7678b_a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:27,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/bdc8034333a14be88514c254c5e88925, store: [table=TestAcidGuarantees family=A region=a862a378d488b0f0d03cbf84efe058da] 2024-12-17T12:39:27,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/bdc8034333a14be88514c254c5e88925 is 175, key is test_row_0/A:col10/1734439165224/Put/seqid=0 2024-12-17T12:39:27,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742189_1365 (size=31105) 2024-12-17T12:39:27,309 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/15bec26e3bb347c49460f7c11db37eb4 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/15bec26e3bb347c49460f7c11db37eb4 2024-12-17T12:39:27,313 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 7 (all) file(s) in a862a378d488b0f0d03cbf84efe058da/A of a862a378d488b0f0d03cbf84efe058da into 15bec26e3bb347c49460f7c11db37eb4(size=30.5 K), total size for store is 30.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:39:27,313 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a862a378d488b0f0d03cbf84efe058da: 2024-12-17T12:39:27,313 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da., storeName=a862a378d488b0f0d03cbf84efe058da/A, priority=9, startTime=1734439166480; duration=0sec 2024-12-17T12:39:27,313 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:39:27,313 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a862a378d488b0f0d03cbf84efe058da:A 2024-12-17T12:39:27,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:27,366 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. as already flushing 2024-12-17T12:39:27,380 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:27,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439227375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:27,380 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:27,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439227376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:27,380 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:27,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439227376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:27,381 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:27,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439227377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:27,381 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:27,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439227377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:27,432 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=148, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/bdc8034333a14be88514c254c5e88925 2024-12-17T12:39:27,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/B/d7f92cabced74b46af4ef56322996c66 is 50, key is test_row_0/B:col10/1734439165224/Put/seqid=0 2024-12-17T12:39:27,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742190_1366 (size=12151) 2024-12-17T12:39:27,485 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:27,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439227481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:27,485 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:27,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439227481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:27,486 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:27,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439227481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:27,486 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:27,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439227482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:27,486 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:27,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439227482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:27,689 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:27,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439227686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:27,689 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:27,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439227687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:27,690 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:27,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439227687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:27,690 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:27,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439227687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:27,690 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:27,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439227688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:27,842 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=148 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/B/d7f92cabced74b46af4ef56322996c66 2024-12-17T12:39:27,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/C/15facc63f3774e69bbde6a67c2dd59b2 is 50, key is test_row_0/C:col10/1734439165224/Put/seqid=0 2024-12-17T12:39:27,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742191_1367 (size=12151) 2024-12-17T12:39:27,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-17T12:39:27,994 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:27,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439227991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:27,994 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:27,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439227991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:27,994 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:27,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439227991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:27,995 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:27,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439227991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:27,995 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:27,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439227992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:28,252 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=148 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/C/15facc63f3774e69bbde6a67c2dd59b2 2024-12-17T12:39:28,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/bdc8034333a14be88514c254c5e88925 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/bdc8034333a14be88514c254c5e88925 2024-12-17T12:39:28,258 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/bdc8034333a14be88514c254c5e88925, entries=150, sequenceid=148, filesize=30.4 K 2024-12-17T12:39:28,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/B/d7f92cabced74b46af4ef56322996c66 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/d7f92cabced74b46af4ef56322996c66 2024-12-17T12:39:28,262 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/d7f92cabced74b46af4ef56322996c66, entries=150, sequenceid=148, filesize=11.9 K 2024-12-17T12:39:28,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/C/15facc63f3774e69bbde6a67c2dd59b2 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/15facc63f3774e69bbde6a67c2dd59b2 2024-12-17T12:39:28,265 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/15facc63f3774e69bbde6a67c2dd59b2, entries=150, sequenceid=148, filesize=11.9 K 2024-12-17T12:39:28,266 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for a862a378d488b0f0d03cbf84efe058da in 1653ms, sequenceid=148, compaction requested=false 2024-12-17T12:39:28,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2538): Flush status journal for a862a378d488b0f0d03cbf84efe058da: 2024-12-17T12:39:28,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:28,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=101 2024-12-17T12:39:28,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4106): Remote procedure done, pid=101 2024-12-17T12:39:28,268 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=101, resume processing ppid=100 2024-12-17T12:39:28,268 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, ppid=100, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.4160 sec 2024-12-17T12:39:28,268 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees in 2.4180 sec 2024-12-17T12:39:28,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:28,498 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a862a378d488b0f0d03cbf84efe058da 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-17T12:39:28,499 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a862a378d488b0f0d03cbf84efe058da, store=A 2024-12-17T12:39:28,499 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:28,499 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a862a378d488b0f0d03cbf84efe058da, store=B 2024-12-17T12:39:28,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:28,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a862a378d488b0f0d03cbf84efe058da, store=C 2024-12-17T12:39:28,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:28,505 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121761b14b755fc24bebb92eb3f8be591633_a862a378d488b0f0d03cbf84efe058da is 50, key is test_row_0/A:col10/1734439168498/Put/seqid=0 2024-12-17T12:39:28,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742192_1368 (size=17284) 2024-12-17T12:39:28,525 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:28,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439228521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:28,529 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:28,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439228525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:28,533 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:28,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439228526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:28,533 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:28,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439228526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:28,533 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:28,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439228526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:28,632 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:28,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439228626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:28,633 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:28,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439228630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:28,640 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:28,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439228634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:28,640 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:28,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439228634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:28,640 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:28,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439228634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:28,837 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:28,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439228834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:28,837 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:28,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439228834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:28,843 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:28,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439228841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:28,843 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:28,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439228841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:28,843 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:28,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439228842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:28,915 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:39:28,918 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121761b14b755fc24bebb92eb3f8be591633_a862a378d488b0f0d03cbf84efe058da to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121761b14b755fc24bebb92eb3f8be591633_a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:28,918 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/8ee63805475745f1a9525ca90f980c40, store: [table=TestAcidGuarantees family=A region=a862a378d488b0f0d03cbf84efe058da] 2024-12-17T12:39:28,919 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/8ee63805475745f1a9525ca90f980c40 is 175, key is test_row_0/A:col10/1734439168498/Put/seqid=0 2024-12-17T12:39:28,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742193_1369 (size=48389) 2024-12-17T12:39:29,141 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:29,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439229138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:29,142 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:29,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439229139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:29,147 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:29,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439229144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:29,147 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:29,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439229145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:29,148 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:29,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439229146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:29,325 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=168, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/8ee63805475745f1a9525ca90f980c40 2024-12-17T12:39:29,331 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/B/63a1865c0f9445eab6481cc459f390da is 50, key is test_row_0/B:col10/1734439168498/Put/seqid=0 2024-12-17T12:39:29,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742194_1370 (size=12151) 2024-12-17T12:39:29,645 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:29,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439229642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:29,650 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:29,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439229647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:29,651 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:29,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439229648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:29,654 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:29,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439229651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:29,654 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:29,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439229651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:29,734 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/B/63a1865c0f9445eab6481cc459f390da 2024-12-17T12:39:29,740 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/C/c2c50ac842fe44f1af1e1e9bbcc6372b is 50, key is test_row_0/C:col10/1734439168498/Put/seqid=0 2024-12-17T12:39:29,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742195_1371 (size=12151) 2024-12-17T12:39:29,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-17T12:39:29,955 INFO [Thread-1513 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 100 completed 2024-12-17T12:39:29,956 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-17T12:39:29,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=102, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees 2024-12-17T12:39:29,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-17T12:39:29,957 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=102, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-17T12:39:29,957 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=102, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-17T12:39:29,957 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=103, ppid=102, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-17T12:39:30,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-17T12:39:30,108 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:30,109 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-17T12:39:30,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:30,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. as already flushing 2024-12-17T12:39:30,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:30,109 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:30,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:30,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:30,143 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/C/c2c50ac842fe44f1af1e1e9bbcc6372b 2024-12-17T12:39:30,146 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/8ee63805475745f1a9525ca90f980c40 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/8ee63805475745f1a9525ca90f980c40 2024-12-17T12:39:30,149 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/8ee63805475745f1a9525ca90f980c40, entries=250, sequenceid=168, filesize=47.3 K 2024-12-17T12:39:30,149 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/B/63a1865c0f9445eab6481cc459f390da as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/63a1865c0f9445eab6481cc459f390da 2024-12-17T12:39:30,152 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/63a1865c0f9445eab6481cc459f390da, entries=150, sequenceid=168, filesize=11.9 K 2024-12-17T12:39:30,152 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/C/c2c50ac842fe44f1af1e1e9bbcc6372b as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/c2c50ac842fe44f1af1e1e9bbcc6372b 2024-12-17T12:39:30,155 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/c2c50ac842fe44f1af1e1e9bbcc6372b, entries=150, sequenceid=168, filesize=11.9 K 2024-12-17T12:39:30,156 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=120.76 KB/123660 for a862a378d488b0f0d03cbf84efe058da in 1658ms, sequenceid=168, compaction requested=true 2024-12-17T12:39:30,156 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a862a378d488b0f0d03cbf84efe058da: 2024-12-17T12:39:30,156 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a862a378d488b0f0d03cbf84efe058da:A, priority=-2147483648, current under compaction store size is 1 2024-12-17T12:39:30,156 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:39:30,156 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a862a378d488b0f0d03cbf84efe058da:B, priority=-2147483648, current under compaction store size is 2 2024-12-17T12:39:30,156 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:39:30,156 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:39:30,156 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:39:30,156 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a862a378d488b0f0d03cbf84efe058da:C, priority=-2147483648, current under compaction store size is 3 2024-12-17T12:39:30,156 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:39:30,157 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 110689 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:39:30,157 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36543 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:39:30,157 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): a862a378d488b0f0d03cbf84efe058da/A is initiating minor compaction (all files) 2024-12-17T12:39:30,157 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): a862a378d488b0f0d03cbf84efe058da/B is initiating minor compaction (all files) 2024-12-17T12:39:30,157 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a862a378d488b0f0d03cbf84efe058da/B in TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:30,157 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a862a378d488b0f0d03cbf84efe058da/A in TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:30,158 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/15bec26e3bb347c49460f7c11db37eb4, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/bdc8034333a14be88514c254c5e88925, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/8ee63805475745f1a9525ca90f980c40] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp, totalSize=108.1 K 2024-12-17T12:39:30,158 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/45dd81309808441eaf17f7d0e3a4c8d5, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/d7f92cabced74b46af4ef56322996c66, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/63a1865c0f9445eab6481cc459f390da] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp, totalSize=35.7 K 2024-12-17T12:39:30,158 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=53.85 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:30,158 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. files: [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/15bec26e3bb347c49460f7c11db37eb4, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/bdc8034333a14be88514c254c5e88925, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/8ee63805475745f1a9525ca90f980c40] 2024-12-17T12:39:30,158 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 45dd81309808441eaf17f7d0e3a4c8d5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1734439164072 2024-12-17T12:39:30,158 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 15bec26e3bb347c49460f7c11db37eb4, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1734439164072 2024-12-17T12:39:30,158 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting d7f92cabced74b46af4ef56322996c66, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=148, earliestPutTs=1734439165224 2024-12-17T12:39:30,158 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting bdc8034333a14be88514c254c5e88925, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=148, earliestPutTs=1734439165224 2024-12-17T12:39:30,158 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 63a1865c0f9445eab6481cc459f390da, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1734439167375 2024-12-17T12:39:30,158 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8ee63805475745f1a9525ca90f980c40, keycount=250, bloomtype=ROW, size=47.3 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1734439167375 2024-12-17T12:39:30,164 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a862a378d488b0f0d03cbf84efe058da] 2024-12-17T12:39:30,165 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a862a378d488b0f0d03cbf84efe058da#B#compaction#309 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 53.85 MB/second 2024-12-17T12:39:30,166 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/B/24a2975bdcb244338ecf920253e21c0b is 50, key is test_row_0/B:col10/1734439168498/Put/seqid=0 2024-12-17T12:39:30,166 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121701aab3f5906f46c0986391aeb2b3afa2_a862a378d488b0f0d03cbf84efe058da store=[table=TestAcidGuarantees family=A region=a862a378d488b0f0d03cbf84efe058da] 2024-12-17T12:39:30,168 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121701aab3f5906f46c0986391aeb2b3afa2_a862a378d488b0f0d03cbf84efe058da, store=[table=TestAcidGuarantees family=A region=a862a378d488b0f0d03cbf84efe058da] 2024-12-17T12:39:30,168 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121701aab3f5906f46c0986391aeb2b3afa2_a862a378d488b0f0d03cbf84efe058da because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a862a378d488b0f0d03cbf84efe058da] 2024-12-17T12:39:30,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742197_1373 (size=4469) 2024-12-17T12:39:30,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742196_1372 (size=12493) 2024-12-17T12:39:30,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-17T12:39:30,261 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:30,261 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-17T12:39:30,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:30,261 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2837): Flushing a862a378d488b0f0d03cbf84efe058da 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-17T12:39:30,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a862a378d488b0f0d03cbf84efe058da, store=A 2024-12-17T12:39:30,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:30,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a862a378d488b0f0d03cbf84efe058da, store=B 2024-12-17T12:39:30,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:30,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a862a378d488b0f0d03cbf84efe058da, store=C 2024-12-17T12:39:30,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:30,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217ecd73cc057db4e838c95d44450a34d58_a862a378d488b0f0d03cbf84efe058da is 50, key is test_row_0/A:col10/1734439168524/Put/seqid=0 2024-12-17T12:39:30,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742198_1374 (size=12304) 2024-12-17T12:39:30,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-17T12:39:30,574 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a862a378d488b0f0d03cbf84efe058da#A#compaction#308 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-12-17T12:39:30,574 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/06c88512004f4916929a5d890edf0d42 is 175, key is test_row_0/A:col10/1734439168498/Put/seqid=0 2024-12-17T12:39:30,578 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/B/24a2975bdcb244338ecf920253e21c0b as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/24a2975bdcb244338ecf920253e21c0b 2024-12-17T12:39:30,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742199_1375 (size=31447) 2024-12-17T12:39:30,582 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a862a378d488b0f0d03cbf84efe058da/B of a862a378d488b0f0d03cbf84efe058da into 24a2975bdcb244338ecf920253e21c0b(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:39:30,582 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a862a378d488b0f0d03cbf84efe058da: 2024-12-17T12:39:30,582 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da., storeName=a862a378d488b0f0d03cbf84efe058da/B, priority=13, startTime=1734439170156; duration=0sec 2024-12-17T12:39:30,582 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:39:30,582 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a862a378d488b0f0d03cbf84efe058da:B 2024-12-17T12:39:30,582 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:39:30,583 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36543 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:39:30,583 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): a862a378d488b0f0d03cbf84efe058da/C is initiating minor compaction (all files) 2024-12-17T12:39:30,583 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a862a378d488b0f0d03cbf84efe058da/C in TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:30,583 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/85e618f25428426ba147c59ec6072c2d, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/15facc63f3774e69bbde6a67c2dd59b2, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/c2c50ac842fe44f1af1e1e9bbcc6372b] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp, totalSize=35.7 K 2024-12-17T12:39:30,584 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 85e618f25428426ba147c59ec6072c2d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1734439164072 2024-12-17T12:39:30,584 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 15facc63f3774e69bbde6a67c2dd59b2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=148, earliestPutTs=1734439165224 2024-12-17T12:39:30,584 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting c2c50ac842fe44f1af1e1e9bbcc6372b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1734439167375 2024-12-17T12:39:30,588 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a862a378d488b0f0d03cbf84efe058da#C#compaction#311 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-12-17T12:39:30,588 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/C/2787499212ef4f50a67ac6002f511476 is 50, key is test_row_0/C:col10/1734439168498/Put/seqid=0 2024-12-17T12:39:30,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742200_1376 (size=12493) 2024-12-17T12:39:30,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:30,653 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. as already flushing 2024-12-17T12:39:30,667 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:30,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439230663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:30,669 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:30,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439230664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:30,669 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:30,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439230666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:30,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:39:30,674 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:30,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439230667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:30,675 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217ecd73cc057db4e838c95d44450a34d58_a862a378d488b0f0d03cbf84efe058da to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217ecd73cc057db4e838c95d44450a34d58_a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:30,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/25850d8486de4cbf8f8496692e61b05c, store: [table=TestAcidGuarantees family=A region=a862a378d488b0f0d03cbf84efe058da] 2024-12-17T12:39:30,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/25850d8486de4cbf8f8496692e61b05c is 175, key is test_row_0/A:col10/1734439168524/Put/seqid=0 2024-12-17T12:39:30,676 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:30,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439230668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:30,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742201_1377 (size=31105) 2024-12-17T12:39:30,773 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:30,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439230768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:30,775 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:30,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439230769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:30,775 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:30,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439230770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:30,781 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:30,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439230775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:30,782 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:30,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439230777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:30,977 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:30,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439230974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:30,977 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:30,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439230977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:30,978 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:30,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439230977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:30,985 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/06c88512004f4916929a5d890edf0d42 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/06c88512004f4916929a5d890edf0d42 2024-12-17T12:39:30,988 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:30,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439230983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:30,988 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:30,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439230984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:30,989 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a862a378d488b0f0d03cbf84efe058da/A of a862a378d488b0f0d03cbf84efe058da into 06c88512004f4916929a5d890edf0d42(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:39:30,989 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a862a378d488b0f0d03cbf84efe058da: 2024-12-17T12:39:30,989 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da., storeName=a862a378d488b0f0d03cbf84efe058da/A, priority=13, startTime=1734439170156; duration=0sec 2024-12-17T12:39:30,989 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:39:30,989 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a862a378d488b0f0d03cbf84efe058da:A 2024-12-17T12:39:30,995 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/C/2787499212ef4f50a67ac6002f511476 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/2787499212ef4f50a67ac6002f511476 2024-12-17T12:39:30,998 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a862a378d488b0f0d03cbf84efe058da/C of a862a378d488b0f0d03cbf84efe058da into 2787499212ef4f50a67ac6002f511476(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:39:30,998 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a862a378d488b0f0d03cbf84efe058da: 2024-12-17T12:39:30,998 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da., storeName=a862a378d488b0f0d03cbf84efe058da/C, priority=13, startTime=1734439170156; duration=0sec 2024-12-17T12:39:30,998 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:39:30,999 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a862a378d488b0f0d03cbf84efe058da:C 2024-12-17T12:39:31,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-17T12:39:31,079 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=189, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/25850d8486de4cbf8f8496692e61b05c 2024-12-17T12:39:31,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/B/2ce13dabc6024257b8a3bca6a9c90fbb is 50, key is test_row_0/B:col10/1734439168524/Put/seqid=0 2024-12-17T12:39:31,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742202_1378 (size=12151) 2024-12-17T12:39:31,283 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:31,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439231280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:31,283 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:31,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439231280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:31,283 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:31,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439231280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:31,290 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:31,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439231290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:31,294 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:31,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439231291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:31,489 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=189 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/B/2ce13dabc6024257b8a3bca6a9c90fbb 2024-12-17T12:39:31,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/C/f6dee84565e34265b789100c58869fab is 50, key is test_row_0/C:col10/1734439168524/Put/seqid=0 2024-12-17T12:39:31,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742203_1379 (size=12151) 2024-12-17T12:39:31,786 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:31,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439231785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:31,788 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:31,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439231785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:31,788 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:31,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439231787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:31,797 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:31,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439231794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:31,801 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:31,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439231798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:31,898 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=189 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/C/f6dee84565e34265b789100c58869fab 2024-12-17T12:39:31,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/25850d8486de4cbf8f8496692e61b05c as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/25850d8486de4cbf8f8496692e61b05c 2024-12-17T12:39:31,904 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/25850d8486de4cbf8f8496692e61b05c, entries=150, sequenceid=189, filesize=30.4 K 2024-12-17T12:39:31,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/B/2ce13dabc6024257b8a3bca6a9c90fbb as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/2ce13dabc6024257b8a3bca6a9c90fbb 2024-12-17T12:39:31,908 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/2ce13dabc6024257b8a3bca6a9c90fbb, entries=150, sequenceid=189, filesize=11.9 K 2024-12-17T12:39:31,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/C/f6dee84565e34265b789100c58869fab as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/f6dee84565e34265b789100c58869fab 2024-12-17T12:39:31,911 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/f6dee84565e34265b789100c58869fab, entries=150, sequenceid=189, filesize=11.9 K 2024-12-17T12:39:31,911 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=87.22 KB/89310 for a862a378d488b0f0d03cbf84efe058da in 1650ms, sequenceid=189, compaction requested=false 2024-12-17T12:39:31,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2538): Flush status journal for a862a378d488b0f0d03cbf84efe058da: 2024-12-17T12:39:31,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:31,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=103 2024-12-17T12:39:31,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4106): Remote procedure done, pid=103 2024-12-17T12:39:31,913 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=103, resume processing ppid=102 2024-12-17T12:39:31,913 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, ppid=102, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9550 sec 2024-12-17T12:39:31,914 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees in 1.9570 sec 2024-12-17T12:39:32,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-17T12:39:32,060 INFO [Thread-1513 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 102 completed 2024-12-17T12:39:32,061 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-17T12:39:32,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees 2024-12-17T12:39:32,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-17T12:39:32,062 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-17T12:39:32,062 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-17T12:39:32,062 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-17T12:39:32,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-17T12:39:32,213 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:32,214 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-17T12:39:32,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:32,214 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2837): Flushing a862a378d488b0f0d03cbf84efe058da 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-17T12:39:32,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a862a378d488b0f0d03cbf84efe058da, store=A 2024-12-17T12:39:32,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:32,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a862a378d488b0f0d03cbf84efe058da, store=B 2024-12-17T12:39:32,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:32,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a862a378d488b0f0d03cbf84efe058da, store=C 2024-12-17T12:39:32,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:32,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412171d31b999b25041379d8d5eef00e70b91_a862a378d488b0f0d03cbf84efe058da is 50, key is test_row_0/A:col10/1734439170667/Put/seqid=0 2024-12-17T12:39:32,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742204_1380 (size=12304) 2024-12-17T12:39:32,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-17T12:39:32,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:39:32,627 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412171d31b999b25041379d8d5eef00e70b91_a862a378d488b0f0d03cbf84efe058da to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412171d31b999b25041379d8d5eef00e70b91_a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:32,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/8a4b9ca18e7544ee96a590e923de6de1, store: [table=TestAcidGuarantees family=A region=a862a378d488b0f0d03cbf84efe058da] 2024-12-17T12:39:32,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/8a4b9ca18e7544ee96a590e923de6de1 is 175, key is test_row_0/A:col10/1734439170667/Put/seqid=0 2024-12-17T12:39:32,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742205_1381 (size=31105) 2024-12-17T12:39:32,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-17T12:39:32,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:32,793 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. as already flushing 2024-12-17T12:39:32,816 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:32,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439232812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:32,816 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:32,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439232812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:32,817 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:32,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439232813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:32,817 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:32,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439232813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:32,818 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:32,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439232814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:32,919 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:32,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439232917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:32,919 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:32,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439232917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:32,920 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:32,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439232917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:32,921 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:32,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439232918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:32,921 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:32,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439232918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:33,032 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=208, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/8a4b9ca18e7544ee96a590e923de6de1 2024-12-17T12:39:33,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/B/afebbacb3123435181c727c7430d9fad is 50, key is test_row_0/B:col10/1734439170667/Put/seqid=0 2024-12-17T12:39:33,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742206_1382 (size=12151) 2024-12-17T12:39:33,122 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:33,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439233120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:33,122 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:33,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439233120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:33,125 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:33,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439233121, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:33,125 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:33,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439233122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:33,125 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:33,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439233122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:33,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-17T12:39:33,425 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:33,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439233423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:33,426 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:33,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439233423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:33,430 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:33,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439233426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:33,430 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:33,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439233427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:33,430 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:33,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439233428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:33,442 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=208 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/B/afebbacb3123435181c727c7430d9fad 2024-12-17T12:39:33,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/C/e319ad539755437b9157cc07538e3f48 is 50, key is test_row_0/C:col10/1734439170667/Put/seqid=0 2024-12-17T12:39:33,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742207_1383 (size=12151) 2024-12-17T12:39:33,859 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=208 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/C/e319ad539755437b9157cc07538e3f48 2024-12-17T12:39:33,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/8a4b9ca18e7544ee96a590e923de6de1 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/8a4b9ca18e7544ee96a590e923de6de1 2024-12-17T12:39:33,865 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/8a4b9ca18e7544ee96a590e923de6de1, entries=150, sequenceid=208, filesize=30.4 K 2024-12-17T12:39:33,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/B/afebbacb3123435181c727c7430d9fad as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/afebbacb3123435181c727c7430d9fad 2024-12-17T12:39:33,869 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/afebbacb3123435181c727c7430d9fad, entries=150, sequenceid=208, filesize=11.9 K 2024-12-17T12:39:33,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/C/e319ad539755437b9157cc07538e3f48 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/e319ad539755437b9157cc07538e3f48 2024-12-17T12:39:33,873 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/e319ad539755437b9157cc07538e3f48, entries=150, sequenceid=208, filesize=11.9 K 2024-12-17T12:39:33,873 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for a862a378d488b0f0d03cbf84efe058da in 1659ms, sequenceid=208, compaction requested=true 2024-12-17T12:39:33,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2538): Flush status journal for a862a378d488b0f0d03cbf84efe058da: 2024-12-17T12:39:33,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:33,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-12-17T12:39:33,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4106): Remote procedure done, pid=105 2024-12-17T12:39:33,875 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=105, resume processing ppid=104 2024-12-17T12:39:33,875 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8120 sec 2024-12-17T12:39:33,876 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees in 1.8150 sec 2024-12-17T12:39:33,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:33,930 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a862a378d488b0f0d03cbf84efe058da 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-17T12:39:33,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a862a378d488b0f0d03cbf84efe058da, store=A 2024-12-17T12:39:33,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:33,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a862a378d488b0f0d03cbf84efe058da, store=B 2024-12-17T12:39:33,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:33,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a862a378d488b0f0d03cbf84efe058da, store=C 2024-12-17T12:39:33,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:33,937 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217ae1b79650cfc4ee9892fc3152fcc1ccd_a862a378d488b0f0d03cbf84efe058da is 50, key is test_row_0/A:col10/1734439172811/Put/seqid=0 2024-12-17T12:39:33,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742208_1384 (size=17284) 2024-12-17T12:39:33,947 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:33,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439233942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:33,949 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:33,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439233942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:33,949 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:33,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439233942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:33,951 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:33,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439233946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:33,951 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:33,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439233946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:34,051 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:34,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439234048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:34,052 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:34,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439234050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:34,052 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:34,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439234050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:34,055 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:34,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439234052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:34,056 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:34,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439234052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:34,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-17T12:39:34,165 INFO [Thread-1513 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 104 completed 2024-12-17T12:39:34,166 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-17T12:39:34,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees 2024-12-17T12:39:34,167 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-17T12:39:34,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-17T12:39:34,168 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-17T12:39:34,168 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=107, ppid=106, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-17T12:39:34,256 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:34,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439234253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:34,257 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:34,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439234254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:34,258 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:34,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439234254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:34,261 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:34,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439234257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:34,261 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:34,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439234257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:34,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-17T12:39:34,319 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:34,319 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-17T12:39:34,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:34,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. as already flushing 2024-12-17T12:39:34,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:34,319 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:34,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:34,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:34,323 DEBUG [Thread-1520 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7cae6c5c to 127.0.0.1:59557 2024-12-17T12:39:34,323 DEBUG [Thread-1520 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:39:34,324 DEBUG [Thread-1522 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x433e2b26 to 127.0.0.1:59557 2024-12-17T12:39:34,324 DEBUG [Thread-1522 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:39:34,324 DEBUG [Thread-1516 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x23ba8092 to 127.0.0.1:59557 2024-12-17T12:39:34,324 DEBUG [Thread-1516 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:39:34,327 DEBUG [Thread-1514 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0a0732aa to 127.0.0.1:59557 2024-12-17T12:39:34,327 DEBUG [Thread-1514 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:39:34,328 DEBUG [Thread-1518 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1c826820 to 127.0.0.1:59557 2024-12-17T12:39:34,328 DEBUG [Thread-1518 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:39:34,345 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:39:34,349 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217ae1b79650cfc4ee9892fc3152fcc1ccd_a862a378d488b0f0d03cbf84efe058da to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217ae1b79650cfc4ee9892fc3152fcc1ccd_a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:34,350 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/9627a5f31e7641148bf436de1dc0d0ee, store: [table=TestAcidGuarantees family=A region=a862a378d488b0f0d03cbf84efe058da] 2024-12-17T12:39:34,351 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/9627a5f31e7641148bf436de1dc0d0ee is 175, key is test_row_0/A:col10/1734439172811/Put/seqid=0 2024-12-17T12:39:34,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742209_1385 (size=48389) 2024-12-17T12:39:34,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-17T12:39:34,471 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:34,472 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-17T12:39:34,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:34,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. as already flushing 2024-12-17T12:39:34,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:34,473 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:34,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:34,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:34,559 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:34,560 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:34,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439234559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:34,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439234559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:34,560 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:34,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439234560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:34,563 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:34,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439234563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:34,564 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:34,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439234563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:34,627 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:34,628 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-17T12:39:34,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:34,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. as already flushing 2024-12-17T12:39:34,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:34,629 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:34,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:34,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:34,756 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=230, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/9627a5f31e7641148bf436de1dc0d0ee 2024-12-17T12:39:34,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-17T12:39:34,772 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/B/da56743ccc4545bda6c07b9163f183b9 is 50, key is test_row_0/B:col10/1734439172811/Put/seqid=0 2024-12-17T12:39:34,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742210_1386 (size=12151) 2024-12-17T12:39:34,783 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:34,783 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-17T12:39:34,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:34,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. as already flushing 2024-12-17T12:39:34,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:34,784 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:34,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:34,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:34,936 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:34,937 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-17T12:39:34,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:34,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. as already flushing 2024-12-17T12:39:34,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:34,938 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:34,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:34,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:35,062 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:35,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52970 deadline: 1734439235062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:35,064 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:35,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52982 deadline: 1734439235063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:35,065 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:35,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53026 deadline: 1734439235065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:35,066 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:35,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53016 deadline: 1734439235066, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:35,068 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:35,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53000 deadline: 1734439235067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:35,093 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:35,093 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-17T12:39:35,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:35,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. as already flushing 2024-12-17T12:39:35,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:35,094 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:35,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:35,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:35,177 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=230 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/B/da56743ccc4545bda6c07b9163f183b9 2024-12-17T12:39:35,192 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/C/6e15a063d96a47f485adae38e8973e4f is 50, key is test_row_0/C:col10/1734439172811/Put/seqid=0 2024-12-17T12:39:35,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742211_1387 (size=12151) 2024-12-17T12:39:35,248 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:35,248 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-17T12:39:35,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:35,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. as already flushing 2024-12-17T12:39:35,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:35,248 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:35,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:35,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:35,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-17T12:39:35,401 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:35,402 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-17T12:39:35,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:35,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. as already flushing 2024-12-17T12:39:35,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:35,403 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:35,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:35,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:35,556 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:35,557 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-17T12:39:35,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:35,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. as already flushing 2024-12-17T12:39:35,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:35,558 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:35,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:35,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:35,596 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=230 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/C/6e15a063d96a47f485adae38e8973e4f 2024-12-17T12:39:35,601 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/9627a5f31e7641148bf436de1dc0d0ee as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/9627a5f31e7641148bf436de1dc0d0ee 2024-12-17T12:39:35,606 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/9627a5f31e7641148bf436de1dc0d0ee, entries=250, sequenceid=230, filesize=47.3 K 2024-12-17T12:39:35,607 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/B/da56743ccc4545bda6c07b9163f183b9 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/da56743ccc4545bda6c07b9163f183b9 2024-12-17T12:39:35,613 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/da56743ccc4545bda6c07b9163f183b9, entries=150, sequenceid=230, filesize=11.9 K 2024-12-17T12:39:35,613 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/C/6e15a063d96a47f485adae38e8973e4f as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/6e15a063d96a47f485adae38e8973e4f 2024-12-17T12:39:35,617 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/6e15a063d96a47f485adae38e8973e4f, entries=150, sequenceid=230, filesize=11.9 K 2024-12-17T12:39:35,618 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for a862a378d488b0f0d03cbf84efe058da in 1688ms, sequenceid=230, compaction requested=true 2024-12-17T12:39:35,618 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a862a378d488b0f0d03cbf84efe058da: 2024-12-17T12:39:35,618 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a862a378d488b0f0d03cbf84efe058da:A, priority=-2147483648, current under compaction store size is 1 2024-12-17T12:39:35,618 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:39:35,618 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a862a378d488b0f0d03cbf84efe058da:B, priority=-2147483648, current under compaction store size is 2 2024-12-17T12:39:35,618 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-17T12:39:35,618 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:39:35,618 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a862a378d488b0f0d03cbf84efe058da:C, priority=-2147483648, current under compaction store size is 3 2024-12-17T12:39:35,618 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:39:35,618 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-17T12:39:35,620 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48946 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-17T12:39:35,620 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 142046 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-17T12:39:35,620 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): a862a378d488b0f0d03cbf84efe058da/B is initiating minor compaction (all files) 2024-12-17T12:39:35,620 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): a862a378d488b0f0d03cbf84efe058da/A is initiating minor compaction (all files) 2024-12-17T12:39:35,620 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a862a378d488b0f0d03cbf84efe058da/B in TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:35,620 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a862a378d488b0f0d03cbf84efe058da/A in TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:35,620 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/06c88512004f4916929a5d890edf0d42, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/25850d8486de4cbf8f8496692e61b05c, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/8a4b9ca18e7544ee96a590e923de6de1, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/9627a5f31e7641148bf436de1dc0d0ee] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp, totalSize=138.7 K 2024-12-17T12:39:35,620 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/24a2975bdcb244338ecf920253e21c0b, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/2ce13dabc6024257b8a3bca6a9c90fbb, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/afebbacb3123435181c727c7430d9fad, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/da56743ccc4545bda6c07b9163f183b9] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp, totalSize=47.8 K 2024-12-17T12:39:35,620 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=53.85 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:35,620 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. files: [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/06c88512004f4916929a5d890edf0d42, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/25850d8486de4cbf8f8496692e61b05c, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/8a4b9ca18e7544ee96a590e923de6de1, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/9627a5f31e7641148bf436de1dc0d0ee] 2024-12-17T12:39:35,620 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 24a2975bdcb244338ecf920253e21c0b, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1734439167375 2024-12-17T12:39:35,620 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 06c88512004f4916929a5d890edf0d42, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1734439167375 2024-12-17T12:39:35,621 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 2ce13dabc6024257b8a3bca6a9c90fbb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=189, earliestPutTs=1734439168524 2024-12-17T12:39:35,621 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 25850d8486de4cbf8f8496692e61b05c, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=189, earliestPutTs=1734439168524 2024-12-17T12:39:35,621 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting afebbacb3123435181c727c7430d9fad, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1734439170658 2024-12-17T12:39:35,621 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8a4b9ca18e7544ee96a590e923de6de1, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1734439170658 2024-12-17T12:39:35,621 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting da56743ccc4545bda6c07b9163f183b9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1734439172811 2024-12-17T12:39:35,621 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9627a5f31e7641148bf436de1dc0d0ee, keycount=250, bloomtype=ROW, size=47.3 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1734439172811 2024-12-17T12:39:35,629 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a862a378d488b0f0d03cbf84efe058da] 2024-12-17T12:39:35,631 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a862a378d488b0f0d03cbf84efe058da#B#compaction#320 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 53.85 MB/second 2024-12-17T12:39:35,631 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241217c64238a180364b7bb879be07b7b94b93_a862a378d488b0f0d03cbf84efe058da store=[table=TestAcidGuarantees family=A region=a862a378d488b0f0d03cbf84efe058da] 2024-12-17T12:39:35,631 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/B/8907cf07bec646c686d28b885a8e2364 is 50, key is test_row_0/B:col10/1734439172811/Put/seqid=0 2024-12-17T12:39:35,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742212_1388 (size=12629) 2024-12-17T12:39:35,638 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241217c64238a180364b7bb879be07b7b94b93_a862a378d488b0f0d03cbf84efe058da, store=[table=TestAcidGuarantees family=A region=a862a378d488b0f0d03cbf84efe058da] 2024-12-17T12:39:35,638 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217c64238a180364b7bb879be07b7b94b93_a862a378d488b0f0d03cbf84efe058da because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a862a378d488b0f0d03cbf84efe058da] 2024-12-17T12:39:35,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742213_1389 (size=4469) 2024-12-17T12:39:35,711 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:35,712 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-17T12:39:35,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:35,712 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2837): Flushing a862a378d488b0f0d03cbf84efe058da 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-17T12:39:35,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a862a378d488b0f0d03cbf84efe058da, store=A 2024-12-17T12:39:35,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:35,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a862a378d488b0f0d03cbf84efe058da, store=B 2024-12-17T12:39:35,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:35,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a862a378d488b0f0d03cbf84efe058da, store=C 2024-12-17T12:39:35,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:35,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412173bf22eacb59e422c8af3b062c49d72e7_a862a378d488b0f0d03cbf84efe058da is 50, key is test_row_0/A:col10/1734439173945/Put/seqid=0 2024-12-17T12:39:35,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742214_1390 (size=12304) 2024-12-17T12:39:36,045 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a862a378d488b0f0d03cbf84efe058da#A#compaction#321 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-12-17T12:39:36,046 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/1b3091d1e1e343b1b4ecdf32f934bb0d is 175, key is test_row_0/A:col10/1734439172811/Put/seqid=0 2024-12-17T12:39:36,046 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/B/8907cf07bec646c686d28b885a8e2364 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/8907cf07bec646c686d28b885a8e2364 2024-12-17T12:39:36,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742215_1391 (size=31583) 2024-12-17T12:39:36,052 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a862a378d488b0f0d03cbf84efe058da/B of a862a378d488b0f0d03cbf84efe058da into 8907cf07bec646c686d28b885a8e2364(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:39:36,052 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a862a378d488b0f0d03cbf84efe058da: 2024-12-17T12:39:36,053 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da., storeName=a862a378d488b0f0d03cbf84efe058da/B, priority=12, startTime=1734439175618; duration=0sec 2024-12-17T12:39:36,053 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:39:36,053 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a862a378d488b0f0d03cbf84efe058da:B 2024-12-17T12:39:36,053 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-17T12:39:36,054 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48946 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-17T12:39:36,054 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): a862a378d488b0f0d03cbf84efe058da/C is initiating minor compaction (all files) 2024-12-17T12:39:36,054 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a862a378d488b0f0d03cbf84efe058da/C in TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:36,054 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/2787499212ef4f50a67ac6002f511476, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/f6dee84565e34265b789100c58869fab, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/e319ad539755437b9157cc07538e3f48, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/6e15a063d96a47f485adae38e8973e4f] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp, totalSize=47.8 K 2024-12-17T12:39:36,055 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 2787499212ef4f50a67ac6002f511476, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1734439167375 2024-12-17T12:39:36,055 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting f6dee84565e34265b789100c58869fab, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=189, earliestPutTs=1734439168524 2024-12-17T12:39:36,056 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting e319ad539755437b9157cc07538e3f48, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1734439170658 2024-12-17T12:39:36,056 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 6e15a063d96a47f485adae38e8973e4f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1734439172811 2024-12-17T12:39:36,063 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a862a378d488b0f0d03cbf84efe058da#C#compaction#323 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-12-17T12:39:36,063 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/C/95e1fc2ba46f4581b97c20d5c7089185 is 50, key is test_row_0/C:col10/1734439172811/Put/seqid=0 2024-12-17T12:39:36,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:36,066 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. as already flushing 2024-12-17T12:39:36,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742216_1392 (size=12629) 2024-12-17T12:39:36,066 DEBUG [Thread-1511 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7976087b to 127.0.0.1:59557 2024-12-17T12:39:36,066 DEBUG [Thread-1511 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:39:36,067 DEBUG [Thread-1503 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x41aa6461 to 127.0.0.1:59557 2024-12-17T12:39:36,067 DEBUG [Thread-1503 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:39:36,074 DEBUG [Thread-1509 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3440b1b1 to 127.0.0.1:59557 2024-12-17T12:39:36,074 DEBUG [Thread-1509 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:39:36,075 DEBUG [Thread-1505 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x748292ad to 127.0.0.1:59557 2024-12-17T12:39:36,075 DEBUG [Thread-1505 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:39:36,076 DEBUG [Thread-1507 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x76165592 to 127.0.0.1:59557 2024-12-17T12:39:36,076 DEBUG [Thread-1507 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:39:36,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:39:36,127 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412173bf22eacb59e422c8af3b062c49d72e7_a862a378d488b0f0d03cbf84efe058da to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412173bf22eacb59e422c8af3b062c49d72e7_a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:36,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/851513e70d5842d09e21c1232f181bef, store: [table=TestAcidGuarantees family=A region=a862a378d488b0f0d03cbf84efe058da] 2024-12-17T12:39:36,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/851513e70d5842d09e21c1232f181bef is 175, key is test_row_0/A:col10/1734439173945/Put/seqid=0 2024-12-17T12:39:36,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742217_1393 (size=31105) 2024-12-17T12:39:36,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-17T12:39:36,463 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/1b3091d1e1e343b1b4ecdf32f934bb0d as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/1b3091d1e1e343b1b4ecdf32f934bb0d 2024-12-17T12:39:36,470 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a862a378d488b0f0d03cbf84efe058da/A of a862a378d488b0f0d03cbf84efe058da into 1b3091d1e1e343b1b4ecdf32f934bb0d(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:39:36,470 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a862a378d488b0f0d03cbf84efe058da: 2024-12-17T12:39:36,470 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da., storeName=a862a378d488b0f0d03cbf84efe058da/A, priority=12, startTime=1734439175618; duration=0sec 2024-12-17T12:39:36,470 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:39:36,470 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a862a378d488b0f0d03cbf84efe058da:A 2024-12-17T12:39:36,471 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/C/95e1fc2ba46f4581b97c20d5c7089185 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/95e1fc2ba46f4581b97c20d5c7089185 2024-12-17T12:39:36,476 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a862a378d488b0f0d03cbf84efe058da/C of a862a378d488b0f0d03cbf84efe058da into 95e1fc2ba46f4581b97c20d5c7089185(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:39:36,476 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a862a378d488b0f0d03cbf84efe058da: 2024-12-17T12:39:36,476 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da., storeName=a862a378d488b0f0d03cbf84efe058da/C, priority=12, startTime=1734439175618; duration=0sec 2024-12-17T12:39:36,476 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:39:36,476 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a862a378d488b0f0d03cbf84efe058da:C 2024-12-17T12:39:36,532 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=244, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/851513e70d5842d09e21c1232f181bef 2024-12-17T12:39:36,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/B/a9f162f3bbaf48ceb1e5409ec5258d4a is 50, key is test_row_0/B:col10/1734439173945/Put/seqid=0 2024-12-17T12:39:36,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742218_1394 (size=12151) 2024-12-17T12:39:36,885 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-17T12:39:36,949 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=244 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/B/a9f162f3bbaf48ceb1e5409ec5258d4a 2024-12-17T12:39:36,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/C/06506a16e5894869bbdd4d4b4617443b is 50, key is test_row_0/C:col10/1734439173945/Put/seqid=0 2024-12-17T12:39:36,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742219_1395 (size=12151) 2024-12-17T12:39:37,365 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=244 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/C/06506a16e5894869bbdd4d4b4617443b 2024-12-17T12:39:37,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/851513e70d5842d09e21c1232f181bef as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/851513e70d5842d09e21c1232f181bef 2024-12-17T12:39:37,382 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/851513e70d5842d09e21c1232f181bef, entries=150, sequenceid=244, filesize=30.4 K 2024-12-17T12:39:37,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/B/a9f162f3bbaf48ceb1e5409ec5258d4a as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/a9f162f3bbaf48ceb1e5409ec5258d4a 2024-12-17T12:39:37,387 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/a9f162f3bbaf48ceb1e5409ec5258d4a, entries=150, sequenceid=244, filesize=11.9 K 2024-12-17T12:39:37,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/C/06506a16e5894869bbdd4d4b4617443b as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/06506a16e5894869bbdd4d4b4617443b 2024-12-17T12:39:37,392 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/06506a16e5894869bbdd4d4b4617443b, entries=150, sequenceid=244, filesize=11.9 K 2024-12-17T12:39:37,393 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=33.54 KB/34350 for a862a378d488b0f0d03cbf84efe058da in 1681ms, sequenceid=244, compaction requested=false 2024-12-17T12:39:37,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2538): Flush status journal for a862a378d488b0f0d03cbf84efe058da: 2024-12-17T12:39:37,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:37,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=107 2024-12-17T12:39:37,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4106): Remote procedure done, pid=107 2024-12-17T12:39:37,395 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=107, resume processing ppid=106 2024-12-17T12:39:37,395 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, ppid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.2260 sec 2024-12-17T12:39:37,397 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees in 3.2300 sec 2024-12-17T12:39:38,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-17T12:39:38,277 INFO [Thread-1513 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 106 completed 2024-12-17T12:39:38,277 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-17T12:39:38,277 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 44 2024-12-17T12:39:38,277 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 40 2024-12-17T12:39:38,277 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 40 2024-12-17T12:39:38,277 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 37 2024-12-17T12:39:38,277 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 39 2024-12-17T12:39:38,277 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-17T12:39:38,278 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-17T12:39:38,278 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2831 2024-12-17T12:39:38,278 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8493 rows 2024-12-17T12:39:38,278 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2829 2024-12-17T12:39:38,278 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8486 rows 2024-12-17T12:39:38,278 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2813 2024-12-17T12:39:38,278 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8439 rows 2024-12-17T12:39:38,278 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2817 2024-12-17T12:39:38,278 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8451 rows 2024-12-17T12:39:38,278 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2812 2024-12-17T12:39:38,278 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8433 rows 2024-12-17T12:39:38,278 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-17T12:39:38,278 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x618c6804 to 127.0.0.1:59557 2024-12-17T12:39:38,278 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:39:38,284 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-17T12:39:38,285 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-17T12:39:38,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=108, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-17T12:39:38,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-17T12:39:38,288 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734439178288"}]},"ts":"1734439178288"} 2024-12-17T12:39:38,289 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-17T12:39:38,324 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-17T12:39:38,325 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-17T12:39:38,326 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=110, ppid=109, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a862a378d488b0f0d03cbf84efe058da, UNASSIGN}] 2024-12-17T12:39:38,328 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=110, ppid=109, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a862a378d488b0f0d03cbf84efe058da, UNASSIGN 2024-12-17T12:39:38,329 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=110 updating hbase:meta row=a862a378d488b0f0d03cbf84efe058da, regionState=CLOSING, regionLocation=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:38,330 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-17T12:39:38,330 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE; CloseRegionProcedure a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372}] 2024-12-17T12:39:38,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-17T12:39:38,482 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:38,483 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(124): Close a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:38,484 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-17T12:39:38,484 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1681): Closing a862a378d488b0f0d03cbf84efe058da, disabling compactions & flushes 2024-12-17T12:39:38,484 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:38,484 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:38,484 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. after waiting 0 ms 2024-12-17T12:39:38,484 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:38,484 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(2837): Flushing a862a378d488b0f0d03cbf84efe058da 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-17T12:39:38,485 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a862a378d488b0f0d03cbf84efe058da, store=A 2024-12-17T12:39:38,485 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:38,485 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a862a378d488b0f0d03cbf84efe058da, store=B 2024-12-17T12:39:38,485 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:38,485 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a862a378d488b0f0d03cbf84efe058da, store=C 2024-12-17T12:39:38,486 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:38,494 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412174022e2a2905641f29497d882081b7450_a862a378d488b0f0d03cbf84efe058da is 50, key is test_row_0/A:col10/1734439176066/Put/seqid=0 2024-12-17T12:39:38,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742220_1396 (size=12304) 2024-12-17T12:39:38,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-17T12:39:38,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-17T12:39:38,899 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:39:38,904 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412174022e2a2905641f29497d882081b7450_a862a378d488b0f0d03cbf84efe058da to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412174022e2a2905641f29497d882081b7450_a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:38,905 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/3cf3115e44aa4665a5929e859570c7eb, store: [table=TestAcidGuarantees family=A region=a862a378d488b0f0d03cbf84efe058da] 2024-12-17T12:39:38,905 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/3cf3115e44aa4665a5929e859570c7eb is 175, key is test_row_0/A:col10/1734439176066/Put/seqid=0 2024-12-17T12:39:38,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742221_1397 (size=31105) 2024-12-17T12:39:39,313 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=255, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/3cf3115e44aa4665a5929e859570c7eb 2024-12-17T12:39:39,325 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/B/8f2201f59ebf4ed0a7e89fa148787c6f is 50, key is test_row_0/B:col10/1734439176066/Put/seqid=0 2024-12-17T12:39:39,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742222_1398 (size=12151) 2024-12-17T12:39:39,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-17T12:39:39,731 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/B/8f2201f59ebf4ed0a7e89fa148787c6f 2024-12-17T12:39:39,744 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/C/2d75b4582b8e4f0b89393268a75d96f4 is 50, key is test_row_0/C:col10/1734439176066/Put/seqid=0 2024-12-17T12:39:39,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742223_1399 (size=12151) 2024-12-17T12:39:40,149 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/C/2d75b4582b8e4f0b89393268a75d96f4 2024-12-17T12:39:40,159 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/A/3cf3115e44aa4665a5929e859570c7eb as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/3cf3115e44aa4665a5929e859570c7eb 2024-12-17T12:39:40,165 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/3cf3115e44aa4665a5929e859570c7eb, entries=150, sequenceid=255, filesize=30.4 K 2024-12-17T12:39:40,166 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/B/8f2201f59ebf4ed0a7e89fa148787c6f as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/8f2201f59ebf4ed0a7e89fa148787c6f 2024-12-17T12:39:40,170 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/8f2201f59ebf4ed0a7e89fa148787c6f, entries=150, sequenceid=255, filesize=11.9 K 2024-12-17T12:39:40,171 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/.tmp/C/2d75b4582b8e4f0b89393268a75d96f4 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/2d75b4582b8e4f0b89393268a75d96f4 2024-12-17T12:39:40,175 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/2d75b4582b8e4f0b89393268a75d96f4, entries=150, sequenceid=255, filesize=11.9 K 2024-12-17T12:39:40,176 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for a862a378d488b0f0d03cbf84efe058da in 1692ms, sequenceid=255, compaction requested=true 2024-12-17T12:39:40,177 DEBUG [StoreCloser-TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/79ab10f2f5f941d89bbec4163ca59353, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/f9724ccdc7b044529644451643efdf49, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/cd283b40d9734f1bb8fbd80403049364, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/9b74acb9aaea4384b1cfff27da7ddff1, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/b3889581d61c4998a49f72246bf6029c, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/2ac4d0d833fd4560a6253b8044716d95, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/5fcf78e2573c4fce87ee4ebaa2fe6755, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/15bec26e3bb347c49460f7c11db37eb4, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/bdc8034333a14be88514c254c5e88925, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/8ee63805475745f1a9525ca90f980c40, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/06c88512004f4916929a5d890edf0d42, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/25850d8486de4cbf8f8496692e61b05c, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/8a4b9ca18e7544ee96a590e923de6de1, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/9627a5f31e7641148bf436de1dc0d0ee] to archive 2024-12-17T12:39:40,178 DEBUG [StoreCloser-TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-17T12:39:40,181 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/f9724ccdc7b044529644451643efdf49 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/f9724ccdc7b044529644451643efdf49 2024-12-17T12:39:40,181 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/79ab10f2f5f941d89bbec4163ca59353 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/79ab10f2f5f941d89bbec4163ca59353 2024-12-17T12:39:40,181 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/9b74acb9aaea4384b1cfff27da7ddff1 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/9b74acb9aaea4384b1cfff27da7ddff1 2024-12-17T12:39:40,181 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/2ac4d0d833fd4560a6253b8044716d95 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/2ac4d0d833fd4560a6253b8044716d95 2024-12-17T12:39:40,181 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/b3889581d61c4998a49f72246bf6029c to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/b3889581d61c4998a49f72246bf6029c 2024-12-17T12:39:40,182 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/5fcf78e2573c4fce87ee4ebaa2fe6755 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/5fcf78e2573c4fce87ee4ebaa2fe6755 2024-12-17T12:39:40,182 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/15bec26e3bb347c49460f7c11db37eb4 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/15bec26e3bb347c49460f7c11db37eb4 2024-12-17T12:39:40,182 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/cd283b40d9734f1bb8fbd80403049364 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/cd283b40d9734f1bb8fbd80403049364 2024-12-17T12:39:40,183 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/bdc8034333a14be88514c254c5e88925 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/bdc8034333a14be88514c254c5e88925 2024-12-17T12:39:40,183 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/8ee63805475745f1a9525ca90f980c40 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/8ee63805475745f1a9525ca90f980c40 2024-12-17T12:39:40,183 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/25850d8486de4cbf8f8496692e61b05c to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/25850d8486de4cbf8f8496692e61b05c 2024-12-17T12:39:40,183 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/06c88512004f4916929a5d890edf0d42 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/06c88512004f4916929a5d890edf0d42 2024-12-17T12:39:40,183 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/9627a5f31e7641148bf436de1dc0d0ee to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/9627a5f31e7641148bf436de1dc0d0ee 2024-12-17T12:39:40,183 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/8a4b9ca18e7544ee96a590e923de6de1 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/8a4b9ca18e7544ee96a590e923de6de1 2024-12-17T12:39:40,184 DEBUG [StoreCloser-TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/9c57b6e5d6004733a9f98cf898df0715, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/9a662925d596433db10453a74156cabd, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/ad86b6f6c46941708bb2fcd361e1825d, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/7b51d39a2f804e278fa495626d5a343d, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/aa9d3bc965e24dceae1f6b786670c8da, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/fcdbc25e28594b8aabbc73da47b1c734, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/45dd81309808441eaf17f7d0e3a4c8d5, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/30c8697974de469d907bed9bb57219c6, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/d7f92cabced74b46af4ef56322996c66, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/24a2975bdcb244338ecf920253e21c0b, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/63a1865c0f9445eab6481cc459f390da, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/2ce13dabc6024257b8a3bca6a9c90fbb, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/afebbacb3123435181c727c7430d9fad, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/da56743ccc4545bda6c07b9163f183b9] to archive 2024-12-17T12:39:40,184 DEBUG [StoreCloser-TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-17T12:39:40,186 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/9c57b6e5d6004733a9f98cf898df0715 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/9c57b6e5d6004733a9f98cf898df0715 2024-12-17T12:39:40,186 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/7b51d39a2f804e278fa495626d5a343d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/7b51d39a2f804e278fa495626d5a343d 2024-12-17T12:39:40,186 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/ad86b6f6c46941708bb2fcd361e1825d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/ad86b6f6c46941708bb2fcd361e1825d 2024-12-17T12:39:40,186 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/fcdbc25e28594b8aabbc73da47b1c734 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/fcdbc25e28594b8aabbc73da47b1c734 2024-12-17T12:39:40,186 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/aa9d3bc965e24dceae1f6b786670c8da to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/aa9d3bc965e24dceae1f6b786670c8da 2024-12-17T12:39:40,186 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/9a662925d596433db10453a74156cabd to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/9a662925d596433db10453a74156cabd 2024-12-17T12:39:40,186 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/45dd81309808441eaf17f7d0e3a4c8d5 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/45dd81309808441eaf17f7d0e3a4c8d5 2024-12-17T12:39:40,186 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/30c8697974de469d907bed9bb57219c6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/30c8697974de469d907bed9bb57219c6 2024-12-17T12:39:40,187 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/d7f92cabced74b46af4ef56322996c66 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/d7f92cabced74b46af4ef56322996c66 2024-12-17T12:39:40,187 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/63a1865c0f9445eab6481cc459f390da to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/63a1865c0f9445eab6481cc459f390da 2024-12-17T12:39:40,187 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/2ce13dabc6024257b8a3bca6a9c90fbb to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/2ce13dabc6024257b8a3bca6a9c90fbb 2024-12-17T12:39:40,187 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/24a2975bdcb244338ecf920253e21c0b to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/24a2975bdcb244338ecf920253e21c0b 2024-12-17T12:39:40,187 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/da56743ccc4545bda6c07b9163f183b9 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/da56743ccc4545bda6c07b9163f183b9 2024-12-17T12:39:40,188 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/afebbacb3123435181c727c7430d9fad to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/afebbacb3123435181c727c7430d9fad 2024-12-17T12:39:40,188 DEBUG [StoreCloser-TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/79596c79a17f4257805d167097862948, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/cbd6805c8742495eabfd2164269ad127, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/e34da04e14d24e628da8656ca82427e7, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/a83614a577704ecda9a7f5bead4cec7f, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/b338e807acfc45e2a93466bc5a1de6dc, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/c6aa40e5d56741e78666644da55d6ad6, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/85e618f25428426ba147c59ec6072c2d, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/0f7333d9ae9844c0bbd27957b2ed192a, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/15facc63f3774e69bbde6a67c2dd59b2, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/2787499212ef4f50a67ac6002f511476, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/c2c50ac842fe44f1af1e1e9bbcc6372b, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/f6dee84565e34265b789100c58869fab, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/e319ad539755437b9157cc07538e3f48, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/6e15a063d96a47f485adae38e8973e4f] to archive 2024-12-17T12:39:40,189 DEBUG [StoreCloser-TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-17T12:39:40,190 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/cbd6805c8742495eabfd2164269ad127 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/cbd6805c8742495eabfd2164269ad127 2024-12-17T12:39:40,191 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/79596c79a17f4257805d167097862948 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/79596c79a17f4257805d167097862948 2024-12-17T12:39:40,191 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/e34da04e14d24e628da8656ca82427e7 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/e34da04e14d24e628da8656ca82427e7 2024-12-17T12:39:40,191 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/85e618f25428426ba147c59ec6072c2d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/85e618f25428426ba147c59ec6072c2d 2024-12-17T12:39:40,191 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/a83614a577704ecda9a7f5bead4cec7f to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/a83614a577704ecda9a7f5bead4cec7f 2024-12-17T12:39:40,191 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/c6aa40e5d56741e78666644da55d6ad6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/c6aa40e5d56741e78666644da55d6ad6 2024-12-17T12:39:40,191 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/0f7333d9ae9844c0bbd27957b2ed192a to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/0f7333d9ae9844c0bbd27957b2ed192a 2024-12-17T12:39:40,191 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/b338e807acfc45e2a93466bc5a1de6dc to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/b338e807acfc45e2a93466bc5a1de6dc 2024-12-17T12:39:40,192 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/15facc63f3774e69bbde6a67c2dd59b2 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/15facc63f3774e69bbde6a67c2dd59b2 2024-12-17T12:39:40,192 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/e319ad539755437b9157cc07538e3f48 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/e319ad539755437b9157cc07538e3f48 2024-12-17T12:39:40,192 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/c2c50ac842fe44f1af1e1e9bbcc6372b to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/c2c50ac842fe44f1af1e1e9bbcc6372b 2024-12-17T12:39:40,192 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/2787499212ef4f50a67ac6002f511476 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/2787499212ef4f50a67ac6002f511476 2024-12-17T12:39:40,192 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/6e15a063d96a47f485adae38e8973e4f to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/6e15a063d96a47f485adae38e8973e4f 2024-12-17T12:39:40,192 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/f6dee84565e34265b789100c58869fab to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/f6dee84565e34265b789100c58869fab 2024-12-17T12:39:40,194 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/recovered.edits/258.seqid, newMaxSeqId=258, maxSeqId=4 2024-12-17T12:39:40,195 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da. 2024-12-17T12:39:40,195 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1635): Region close journal for a862a378d488b0f0d03cbf84efe058da: 2024-12-17T12:39:40,196 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(170): Closed a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:40,196 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=110 updating hbase:meta row=a862a378d488b0f0d03cbf84efe058da, regionState=CLOSED 2024-12-17T12:39:40,198 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=111, resume processing ppid=110 2024-12-17T12:39:40,198 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=110, state=SUCCESS; CloseRegionProcedure a862a378d488b0f0d03cbf84efe058da, server=681c08bfdbdf,36491,1734439058372 in 1.8670 sec 2024-12-17T12:39:40,199 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=110, resume processing ppid=109 2024-12-17T12:39:40,199 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, ppid=109, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=a862a378d488b0f0d03cbf84efe058da, UNASSIGN in 1.8720 sec 2024-12-17T12:39:40,199 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=109, resume processing ppid=108 2024-12-17T12:39:40,199 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=108, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.8740 sec 2024-12-17T12:39:40,200 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734439180200"}]},"ts":"1734439180200"} 2024-12-17T12:39:40,201 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-17T12:39:40,249 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-17T12:39:40,251 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.9640 sec 2024-12-17T12:39:40,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-17T12:39:40,397 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 108 completed 2024-12-17T12:39:40,399 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-17T12:39:40,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=112, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-17T12:39:40,402 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=112, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-17T12:39:40,404 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=112, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-17T12:39:40,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-17T12:39:40,405 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:40,409 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A, FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B, FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C, FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/recovered.edits] 2024-12-17T12:39:40,414 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/1b3091d1e1e343b1b4ecdf32f934bb0d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/1b3091d1e1e343b1b4ecdf32f934bb0d 2024-12-17T12:39:40,414 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/851513e70d5842d09e21c1232f181bef to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/851513e70d5842d09e21c1232f181bef 2024-12-17T12:39:40,414 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/3cf3115e44aa4665a5929e859570c7eb to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/A/3cf3115e44aa4665a5929e859570c7eb 2024-12-17T12:39:40,418 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/8907cf07bec646c686d28b885a8e2364 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/8907cf07bec646c686d28b885a8e2364 2024-12-17T12:39:40,418 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/8f2201f59ebf4ed0a7e89fa148787c6f to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/8f2201f59ebf4ed0a7e89fa148787c6f 2024-12-17T12:39:40,418 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/a9f162f3bbaf48ceb1e5409ec5258d4a to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/B/a9f162f3bbaf48ceb1e5409ec5258d4a 2024-12-17T12:39:40,423 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/06506a16e5894869bbdd4d4b4617443b to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/06506a16e5894869bbdd4d4b4617443b 2024-12-17T12:39:40,423 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/95e1fc2ba46f4581b97c20d5c7089185 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/95e1fc2ba46f4581b97c20d5c7089185 2024-12-17T12:39:40,423 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/2d75b4582b8e4f0b89393268a75d96f4 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/C/2d75b4582b8e4f0b89393268a75d96f4 2024-12-17T12:39:40,427 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/recovered.edits/258.seqid to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da/recovered.edits/258.seqid 2024-12-17T12:39:40,427 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:40,427 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-17T12:39:40,428 DEBUG [PEWorker-3 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-17T12:39:40,429 DEBUG [PEWorker-3 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-17T12:39:40,440 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412171d31b999b25041379d8d5eef00e70b91_a862a378d488b0f0d03cbf84efe058da to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412171d31b999b25041379d8d5eef00e70b91_a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:40,440 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412170fa31a642f024d3396e63c2dc852b17e_a862a378d488b0f0d03cbf84efe058da to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412170fa31a642f024d3396e63c2dc852b17e_a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:40,440 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412173bf22eacb59e422c8af3b062c49d72e7_a862a378d488b0f0d03cbf84efe058da to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412173bf22eacb59e422c8af3b062c49d72e7_a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:40,440 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412174022e2a2905641f29497d882081b7450_a862a378d488b0f0d03cbf84efe058da to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412174022e2a2905641f29497d882081b7450_a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:40,440 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217444ad12b227b4e3c912f9366d1d5ae2d_a862a378d488b0f0d03cbf84efe058da to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217444ad12b227b4e3c912f9366d1d5ae2d_a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:40,440 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412174c758af4c37d4a12bcf08767a79c8bab_a862a378d488b0f0d03cbf84efe058da to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412174c758af4c37d4a12bcf08767a79c8bab_a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:40,440 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412175191f8459fff42c4b1964602cbc7678b_a862a378d488b0f0d03cbf84efe058da to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412175191f8459fff42c4b1964602cbc7678b_a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:40,440 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412175a0b91d76a6444228b8739db7f60a46b_a862a378d488b0f0d03cbf84efe058da to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412175a0b91d76a6444228b8739db7f60a46b_a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:40,441 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121761b14b755fc24bebb92eb3f8be591633_a862a378d488b0f0d03cbf84efe058da to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121761b14b755fc24bebb92eb3f8be591633_a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:40,441 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412176a8442d0787146d2bfb9288c5b088ca2_a862a378d488b0f0d03cbf84efe058da to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412176a8442d0787146d2bfb9288c5b088ca2_a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:40,441 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217ae1b79650cfc4ee9892fc3152fcc1ccd_a862a378d488b0f0d03cbf84efe058da to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217ae1b79650cfc4ee9892fc3152fcc1ccd_a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:40,441 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121778a542e1245549c6a0ab41da46fb92b9_a862a378d488b0f0d03cbf84efe058da to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121778a542e1245549c6a0ab41da46fb92b9_a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:40,441 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217b52602a748f447d6a09a314e157e1dba_a862a378d488b0f0d03cbf84efe058da to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217b52602a748f447d6a09a314e157e1dba_a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:40,442 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217ecd73cc057db4e838c95d44450a34d58_a862a378d488b0f0d03cbf84efe058da to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217ecd73cc057db4e838c95d44450a34d58_a862a378d488b0f0d03cbf84efe058da 2024-12-17T12:39:40,442 DEBUG [PEWorker-3 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-17T12:39:40,444 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=112, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-17T12:39:40,445 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-17T12:39:40,446 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-17T12:39:40,447 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=112, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-17T12:39:40,447 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-17T12:39:40,448 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734439180447"}]},"ts":"9223372036854775807"} 2024-12-17T12:39:40,449 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-17T12:39:40,449 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => a862a378d488b0f0d03cbf84efe058da, NAME => 'TestAcidGuarantees,,1734439151065.a862a378d488b0f0d03cbf84efe058da.', STARTKEY => '', ENDKEY => ''}] 2024-12-17T12:39:40,449 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-17T12:39:40,449 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734439180449"}]},"ts":"9223372036854775807"} 2024-12-17T12:39:40,450 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-17T12:39:40,458 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=112, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-17T12:39:40,459 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 59 msec 2024-12-17T12:39:40,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-17T12:39:40,505 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 112 completed 2024-12-17T12:39:40,517 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=244 (was 245), OpenFileDescriptor=449 (was 447) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=311 (was 341), ProcessCount=11 (was 11), AvailableMemoryMB=3716 (was 3750) 2024-12-17T12:39:40,526 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=244, OpenFileDescriptor=449, MaxFileDescriptor=1048576, SystemLoadAverage=311, ProcessCount=11, AvailableMemoryMB=3716 2024-12-17T12:39:40,528 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-17T12:39:40,528 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-17T12:39:40,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=113, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-17T12:39:40,530 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=113, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-17T12:39:40,530 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:39:40,530 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 113 2024-12-17T12:39:40,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=113 2024-12-17T12:39:40,530 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=113, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-17T12:39:40,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742224_1400 (size=963) 2024-12-17T12:39:40,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=113 2024-12-17T12:39:40,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=113 2024-12-17T12:39:40,940 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9 2024-12-17T12:39:40,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742225_1401 (size=53) 2024-12-17T12:39:40,984 DEBUG [regionserver/681c08bfdbdf:0.Chore.1 {}] throttle.PressureAwareCompactionThroughputController(103): CompactionPressure is 0.0, tune throughput to 50.00 MB/second 2024-12-17T12:39:41,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=113 2024-12-17T12:39:41,354 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-17T12:39:41,354 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 224d188997b94bb7d93c906d2c2bf845, disabling compactions & flushes 2024-12-17T12:39:41,354 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:41,354 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:41,354 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. after waiting 0 ms 2024-12-17T12:39:41,354 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:41,354 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:41,354 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 224d188997b94bb7d93c906d2c2bf845: 2024-12-17T12:39:41,357 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=113, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-17T12:39:41,357 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1734439181357"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734439181357"}]},"ts":"1734439181357"} 2024-12-17T12:39:41,360 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-17T12:39:41,362 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=113, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-17T12:39:41,362 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734439181362"}]},"ts":"1734439181362"} 2024-12-17T12:39:41,364 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-17T12:39:41,416 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=114, ppid=113, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=224d188997b94bb7d93c906d2c2bf845, ASSIGN}] 2024-12-17T12:39:41,418 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=114, ppid=113, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=224d188997b94bb7d93c906d2c2bf845, ASSIGN 2024-12-17T12:39:41,419 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=114, ppid=113, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=224d188997b94bb7d93c906d2c2bf845, ASSIGN; state=OFFLINE, location=681c08bfdbdf,36491,1734439058372; forceNewPlan=false, retain=false 2024-12-17T12:39:41,570 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=114 updating hbase:meta row=224d188997b94bb7d93c906d2c2bf845, regionState=OPENING, regionLocation=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:41,573 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE; OpenRegionProcedure 224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372}] 2024-12-17T12:39:41,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=113 2024-12-17T12:39:41,727 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:41,769 INFO [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=115}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:41,770 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=115}] regionserver.HRegion(7285): Opening region: {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} 2024-12-17T12:39:41,771 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=115}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 224d188997b94bb7d93c906d2c2bf845 2024-12-17T12:39:41,771 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=115}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-17T12:39:41,771 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=115}] regionserver.HRegion(7327): checking encryption for 224d188997b94bb7d93c906d2c2bf845 2024-12-17T12:39:41,771 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=115}] regionserver.HRegion(7330): checking classloading for 224d188997b94bb7d93c906d2c2bf845 2024-12-17T12:39:41,773 INFO [StoreOpener-224d188997b94bb7d93c906d2c2bf845-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 224d188997b94bb7d93c906d2c2bf845 2024-12-17T12:39:41,774 INFO [StoreOpener-224d188997b94bb7d93c906d2c2bf845-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-17T12:39:41,775 INFO [StoreOpener-224d188997b94bb7d93c906d2c2bf845-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 224d188997b94bb7d93c906d2c2bf845 columnFamilyName A 2024-12-17T12:39:41,775 DEBUG [StoreOpener-224d188997b94bb7d93c906d2c2bf845-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:39:41,775 INFO [StoreOpener-224d188997b94bb7d93c906d2c2bf845-1 {}] regionserver.HStore(327): Store=224d188997b94bb7d93c906d2c2bf845/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-17T12:39:41,775 INFO [StoreOpener-224d188997b94bb7d93c906d2c2bf845-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 224d188997b94bb7d93c906d2c2bf845 2024-12-17T12:39:41,777 INFO [StoreOpener-224d188997b94bb7d93c906d2c2bf845-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-17T12:39:41,777 INFO [StoreOpener-224d188997b94bb7d93c906d2c2bf845-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 224d188997b94bb7d93c906d2c2bf845 columnFamilyName B 2024-12-17T12:39:41,777 DEBUG [StoreOpener-224d188997b94bb7d93c906d2c2bf845-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:39:41,777 INFO [StoreOpener-224d188997b94bb7d93c906d2c2bf845-1 {}] regionserver.HStore(327): Store=224d188997b94bb7d93c906d2c2bf845/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-17T12:39:41,778 INFO [StoreOpener-224d188997b94bb7d93c906d2c2bf845-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 224d188997b94bb7d93c906d2c2bf845 2024-12-17T12:39:41,779 INFO [StoreOpener-224d188997b94bb7d93c906d2c2bf845-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-17T12:39:41,779 INFO [StoreOpener-224d188997b94bb7d93c906d2c2bf845-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 224d188997b94bb7d93c906d2c2bf845 columnFamilyName C 2024-12-17T12:39:41,779 DEBUG [StoreOpener-224d188997b94bb7d93c906d2c2bf845-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:39:41,779 INFO [StoreOpener-224d188997b94bb7d93c906d2c2bf845-1 {}] regionserver.HStore(327): Store=224d188997b94bb7d93c906d2c2bf845/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-17T12:39:41,780 INFO [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=115}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:41,780 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=115}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845 2024-12-17T12:39:41,780 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=115}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845 2024-12-17T12:39:41,781 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=115}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-17T12:39:41,782 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=115}] regionserver.HRegion(1085): writing seq id for 224d188997b94bb7d93c906d2c2bf845 2024-12-17T12:39:41,784 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=115}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-17T12:39:41,784 INFO [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=115}] regionserver.HRegion(1102): Opened 224d188997b94bb7d93c906d2c2bf845; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66944532, jitterRate=-0.002448737621307373}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-17T12:39:41,784 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=115}] regionserver.HRegion(1001): Region open journal for 224d188997b94bb7d93c906d2c2bf845: 2024-12-17T12:39:41,785 INFO [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=115}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845., pid=115, masterSystemTime=1734439181727 2024-12-17T12:39:41,786 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=115}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:41,786 INFO [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=115}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:41,786 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=114 updating hbase:meta row=224d188997b94bb7d93c906d2c2bf845, regionState=OPEN, openSeqNum=2, regionLocation=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:41,788 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=115, resume processing ppid=114 2024-12-17T12:39:41,788 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, ppid=114, state=SUCCESS; OpenRegionProcedure 224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 in 214 msec 2024-12-17T12:39:41,789 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=114, resume processing ppid=113 2024-12-17T12:39:41,789 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, ppid=113, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=224d188997b94bb7d93c906d2c2bf845, ASSIGN in 372 msec 2024-12-17T12:39:41,789 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=113, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-17T12:39:41,789 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734439181789"}]},"ts":"1734439181789"} 2024-12-17T12:39:41,790 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-17T12:39:41,799 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=113, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-17T12:39:41,800 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2710 sec 2024-12-17T12:39:42,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=113 2024-12-17T12:39:42,639 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 113 completed 2024-12-17T12:39:42,642 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x190853fc to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@a9306be 2024-12-17T12:39:42,684 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@24f64590, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:39:42,687 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:39:42,690 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33232, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:39:42,692 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-17T12:39:42,693 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42948, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-17T12:39:42,695 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x42e904d8 to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@505d5ccd 2024-12-17T12:39:42,706 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@46114993, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:39:42,707 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7c5c4716 to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@465dc764 2024-12-17T12:39:42,716 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a4c53ed, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:39:42,717 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2885d2d9 to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@cb464a 2024-12-17T12:39:42,725 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68f0be85, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:39:42,726 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x22e911df to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@78cafade 2024-12-17T12:39:42,732 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@152377d4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:39:42,733 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3b727d6e to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@14c16cd4 2024-12-17T12:39:42,741 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a52344f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:39:42,741 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0341384e to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@8ba8425 2024-12-17T12:39:42,749 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a11164b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:39:42,750 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x26b120d9 to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7af61386 2024-12-17T12:39:42,757 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8a7e1dd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:39:42,758 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4c1ec7ee to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@63e87c8 2024-12-17T12:39:42,766 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@31a027db, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:39:42,766 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5ccff4bf to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2e3d3829 2024-12-17T12:39:42,774 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e934cca, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:39:42,775 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x783a99f7 to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3f810aa9 2024-12-17T12:39:42,783 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b70f48f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:39:42,785 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-17T12:39:42,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees 2024-12-17T12:39:42,786 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-17T12:39:42,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-17T12:39:42,787 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-17T12:39:42,787 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-17T12:39:42,787 DEBUG [hconnection-0x78cafc01-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:39:42,788 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33236, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:39:42,790 DEBUG [hconnection-0x4c9c98f2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:39:42,790 DEBUG [hconnection-0x5c48ff7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:39:42,790 DEBUG [hconnection-0x5986fbb2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:39:42,791 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33252, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:39:42,791 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33254, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:39:42,791 DEBUG [hconnection-0x2e2061ba-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:39:42,791 DEBUG [hconnection-0x4974d6c6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:39:42,791 DEBUG [hconnection-0x1486719b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:39:42,791 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33262, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:39:42,792 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33276, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:39:42,792 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33280, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:39:42,792 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33294, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:39:42,793 DEBUG [hconnection-0x1412a397-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:39:42,793 DEBUG [hconnection-0x7758bcb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:39:42,793 DEBUG [hconnection-0x185bbc7e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:39:42,794 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33308, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:39:42,794 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33316, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:39:42,795 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33332, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:39:42,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 224d188997b94bb7d93c906d2c2bf845 2024-12-17T12:39:42,824 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 224d188997b94bb7d93c906d2c2bf845 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-17T12:39:42,824 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 224d188997b94bb7d93c906d2c2bf845, store=A 2024-12-17T12:39:42,825 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:42,825 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 224d188997b94bb7d93c906d2c2bf845, store=B 2024-12-17T12:39:42,825 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:42,825 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 224d188997b94bb7d93c906d2c2bf845, store=C 2024-12-17T12:39:42,825 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:42,842 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:42,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439242841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:42,842 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:42,842 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:42,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439242841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:42,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439242842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:42,842 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:42,842 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:42,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439242842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:42,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33252 deadline: 1734439242842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:42,842 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/a87fa336362c4c549be8cdcd87a3a2d1 is 50, key is test_row_0/A:col10/1734439182823/Put/seqid=0 2024-12-17T12:39:42,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742226_1402 (size=12001) 2024-12-17T12:39:42,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-17T12:39:42,938 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:42,938 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-17T12:39:42,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:42,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. as already flushing 2024-12-17T12:39:42,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:42,938 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:42,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:42,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:42,945 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:42,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439242943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:42,945 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:42,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439242943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:42,945 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:42,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439242943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:42,946 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:42,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33252 deadline: 1734439242943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:42,946 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:42,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439242943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:43,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-17T12:39:43,090 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:43,090 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-17T12:39:43,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:43,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. as already flushing 2024-12-17T12:39:43,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:43,091 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:43,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:43,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:43,149 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:43,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439243146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:43,149 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:43,149 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:43,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33252 deadline: 1734439243146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:43,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439243147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:43,149 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:43,149 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:43,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439243147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:43,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439243147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:43,242 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:43,242 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-17T12:39:43,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:43,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. as already flushing 2024-12-17T12:39:43,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:43,243 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:43,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:43,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:43,250 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/a87fa336362c4c549be8cdcd87a3a2d1 2024-12-17T12:39:43,272 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/56006a5a86b349709635719d584e9db1 is 50, key is test_row_0/B:col10/1734439182823/Put/seqid=0 2024-12-17T12:39:43,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742227_1403 (size=12001) 2024-12-17T12:39:43,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-17T12:39:43,394 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:43,394 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-17T12:39:43,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:43,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. as already flushing 2024-12-17T12:39:43,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:43,395 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:43,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:43,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:43,450 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:43,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439243450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:43,451 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:43,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439243450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:43,453 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:43,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33252 deadline: 1734439243452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:43,454 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:43,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439243452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:43,454 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:43,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439243452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:43,546 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:43,547 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-17T12:39:43,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:43,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. as already flushing 2024-12-17T12:39:43,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:43,547 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:43,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:43,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:43,675 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/56006a5a86b349709635719d584e9db1 2024-12-17T12:39:43,692 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/3254b4380bbf4091abdc87176505f2e2 is 50, key is test_row_0/C:col10/1734439182823/Put/seqid=0 2024-12-17T12:39:43,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742228_1404 (size=12001) 2024-12-17T12:39:43,698 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:43,698 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-17T12:39:43,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:43,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. as already flushing 2024-12-17T12:39:43,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:43,699 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:43,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:43,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:43,850 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:43,851 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-17T12:39:43,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:43,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. as already flushing 2024-12-17T12:39:43,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:43,851 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:43,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:43,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:43,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-17T12:39:43,954 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:43,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439243951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:43,957 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:43,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439243954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:43,957 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:43,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439243954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:43,960 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:43,960 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:43,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439243955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:43,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33252 deadline: 1734439243957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:44,002 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:44,003 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-17T12:39:44,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:44,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. as already flushing 2024-12-17T12:39:44,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:44,003 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:44,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:44,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:44,095 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/3254b4380bbf4091abdc87176505f2e2 2024-12-17T12:39:44,098 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/a87fa336362c4c549be8cdcd87a3a2d1 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/a87fa336362c4c549be8cdcd87a3a2d1 2024-12-17T12:39:44,101 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/a87fa336362c4c549be8cdcd87a3a2d1, entries=150, sequenceid=13, filesize=11.7 K 2024-12-17T12:39:44,102 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/56006a5a86b349709635719d584e9db1 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/56006a5a86b349709635719d584e9db1 2024-12-17T12:39:44,105 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/56006a5a86b349709635719d584e9db1, entries=150, sequenceid=13, filesize=11.7 K 2024-12-17T12:39:44,105 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/3254b4380bbf4091abdc87176505f2e2 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/3254b4380bbf4091abdc87176505f2e2 2024-12-17T12:39:44,108 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/3254b4380bbf4091abdc87176505f2e2, entries=150, sequenceid=13, filesize=11.7 K 2024-12-17T12:39:44,109 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=154.31 KB/158010 for 224d188997b94bb7d93c906d2c2bf845 in 1285ms, sequenceid=13, compaction requested=false 2024-12-17T12:39:44,109 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-17T12:39:44,110 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 224d188997b94bb7d93c906d2c2bf845: 2024-12-17T12:39:44,155 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:44,155 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-17T12:39:44,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:44,155 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2837): Flushing 224d188997b94bb7d93c906d2c2bf845 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-17T12:39:44,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 224d188997b94bb7d93c906d2c2bf845, store=A 2024-12-17T12:39:44,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:44,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 224d188997b94bb7d93c906d2c2bf845, store=B 2024-12-17T12:39:44,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:44,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 224d188997b94bb7d93c906d2c2bf845, store=C 2024-12-17T12:39:44,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:44,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/33a062d3b0424d658c01bff6ba8a9418 is 50, key is test_row_0/A:col10/1734439182840/Put/seqid=0 2024-12-17T12:39:44,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742229_1405 (size=12001) 2024-12-17T12:39:44,562 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/33a062d3b0424d658c01bff6ba8a9418 2024-12-17T12:39:44,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/e7a7c90af26b4c75a8bbc8a780367a4e is 50, key is test_row_0/B:col10/1734439182840/Put/seqid=0 2024-12-17T12:39:44,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742230_1406 (size=12001) 2024-12-17T12:39:44,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-17T12:39:44,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 224d188997b94bb7d93c906d2c2bf845 2024-12-17T12:39:44,960 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. as already flushing 2024-12-17T12:39:44,969 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:44,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439244965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:44,969 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:44,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439244966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:44,969 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:44,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33252 deadline: 1734439244967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:44,971 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:44,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439244968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:44,971 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:44,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439244969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:44,988 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/e7a7c90af26b4c75a8bbc8a780367a4e 2024-12-17T12:39:44,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/89945f84eb2f4131a72ec63e3cd21c28 is 50, key is test_row_0/C:col10/1734439182840/Put/seqid=0 2024-12-17T12:39:44,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742231_1407 (size=12001) 2024-12-17T12:39:45,071 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:45,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439245070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:45,071 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:45,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439245070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:45,074 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:45,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439245072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:45,074 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:45,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439245072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:45,275 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:45,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439245273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:45,275 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:45,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439245273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:45,275 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:45,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439245275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:45,279 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:45,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439245276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:45,398 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/89945f84eb2f4131a72ec63e3cd21c28 2024-12-17T12:39:45,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/33a062d3b0424d658c01bff6ba8a9418 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/33a062d3b0424d658c01bff6ba8a9418 2024-12-17T12:39:45,404 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/33a062d3b0424d658c01bff6ba8a9418, entries=150, sequenceid=39, filesize=11.7 K 2024-12-17T12:39:45,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/e7a7c90af26b4c75a8bbc8a780367a4e as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/e7a7c90af26b4c75a8bbc8a780367a4e 2024-12-17T12:39:45,407 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/e7a7c90af26b4c75a8bbc8a780367a4e, entries=150, sequenceid=39, filesize=11.7 K 2024-12-17T12:39:45,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/89945f84eb2f4131a72ec63e3cd21c28 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/89945f84eb2f4131a72ec63e3cd21c28 2024-12-17T12:39:45,410 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/89945f84eb2f4131a72ec63e3cd21c28, entries=150, sequenceid=39, filesize=11.7 K 2024-12-17T12:39:45,411 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for 224d188997b94bb7d93c906d2c2bf845 in 1256ms, sequenceid=39, compaction requested=false 2024-12-17T12:39:45,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2538): Flush status journal for 224d188997b94bb7d93c906d2c2bf845: 2024-12-17T12:39:45,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:45,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=117 2024-12-17T12:39:45,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4106): Remote procedure done, pid=117 2024-12-17T12:39:45,413 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=117, resume processing ppid=116 2024-12-17T12:39:45,413 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6250 sec 2024-12-17T12:39:45,413 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees in 2.6280 sec 2024-12-17T12:39:45,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 224d188997b94bb7d93c906d2c2bf845 2024-12-17T12:39:45,578 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 224d188997b94bb7d93c906d2c2bf845 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-17T12:39:45,579 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 224d188997b94bb7d93c906d2c2bf845, store=A 2024-12-17T12:39:45,579 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:45,579 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 224d188997b94bb7d93c906d2c2bf845, store=B 2024-12-17T12:39:45,579 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:45,579 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 224d188997b94bb7d93c906d2c2bf845, store=C 2024-12-17T12:39:45,579 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:45,582 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/8d2fa36e38914f0ba9708d585fd7db79 is 50, key is test_row_0/A:col10/1734439185578/Put/seqid=0 2024-12-17T12:39:45,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742232_1408 (size=16681) 2024-12-17T12:39:45,606 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:45,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439245598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:45,606 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:45,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439245599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:45,606 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:45,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439245602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:45,610 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:45,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439245606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:45,709 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:45,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439245707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:45,709 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:45,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439245707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:45,709 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:45,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439245707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:45,713 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:45,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439245711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:45,716 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-17T12:39:45,915 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:45,915 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:45,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439245911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:45,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439245910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:45,915 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:45,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439245911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:45,919 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:45,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439245914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:45,987 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/8d2fa36e38914f0ba9708d585fd7db79 2024-12-17T12:39:45,993 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/55f9c369db4342baaa8e955e1b3f6823 is 50, key is test_row_0/B:col10/1734439185578/Put/seqid=0 2024-12-17T12:39:45,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742233_1409 (size=12001) 2024-12-17T12:39:45,997 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/55f9c369db4342baaa8e955e1b3f6823 2024-12-17T12:39:46,003 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/d290113bf1cf4f79956963b7cb9da415 is 50, key is test_row_0/C:col10/1734439185578/Put/seqid=0 2024-12-17T12:39:46,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742234_1410 (size=12001) 2024-12-17T12:39:46,223 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:46,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439246217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:46,223 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:46,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439246217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:46,224 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:46,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439246218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:46,224 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:46,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439246221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:46,406 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/d290113bf1cf4f79956963b7cb9da415 2024-12-17T12:39:46,409 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/8d2fa36e38914f0ba9708d585fd7db79 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/8d2fa36e38914f0ba9708d585fd7db79 2024-12-17T12:39:46,412 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/8d2fa36e38914f0ba9708d585fd7db79, entries=250, sequenceid=52, filesize=16.3 K 2024-12-17T12:39:46,412 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/55f9c369db4342baaa8e955e1b3f6823 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/55f9c369db4342baaa8e955e1b3f6823 2024-12-17T12:39:46,415 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/55f9c369db4342baaa8e955e1b3f6823, entries=150, sequenceid=52, filesize=11.7 K 2024-12-17T12:39:46,415 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/d290113bf1cf4f79956963b7cb9da415 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/d290113bf1cf4f79956963b7cb9da415 2024-12-17T12:39:46,418 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/d290113bf1cf4f79956963b7cb9da415, entries=150, sequenceid=52, filesize=11.7 K 2024-12-17T12:39:46,419 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 224d188997b94bb7d93c906d2c2bf845 in 841ms, sequenceid=52, compaction requested=true 2024-12-17T12:39:46,419 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 224d188997b94bb7d93c906d2c2bf845: 2024-12-17T12:39:46,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 224d188997b94bb7d93c906d2c2bf845:A, priority=-2147483648, current under compaction store size is 1 2024-12-17T12:39:46,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:39:46,419 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:39:46,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 224d188997b94bb7d93c906d2c2bf845:B, priority=-2147483648, current under compaction store size is 2 2024-12-17T12:39:46,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:39:46,419 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:39:46,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 224d188997b94bb7d93c906d2c2bf845:C, priority=-2147483648, current under compaction store size is 3 2024-12-17T12:39:46,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:39:46,420 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40683 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:39:46,420 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:39:46,420 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 224d188997b94bb7d93c906d2c2bf845/B is initiating minor compaction (all files) 2024-12-17T12:39:46,420 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): 224d188997b94bb7d93c906d2c2bf845/A is initiating minor compaction (all files) 2024-12-17T12:39:46,420 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 224d188997b94bb7d93c906d2c2bf845/A in TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:46,420 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 224d188997b94bb7d93c906d2c2bf845/B in TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:46,420 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/a87fa336362c4c549be8cdcd87a3a2d1, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/33a062d3b0424d658c01bff6ba8a9418, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/8d2fa36e38914f0ba9708d585fd7db79] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp, totalSize=39.7 K 2024-12-17T12:39:46,420 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/56006a5a86b349709635719d584e9db1, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/e7a7c90af26b4c75a8bbc8a780367a4e, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/55f9c369db4342baaa8e955e1b3f6823] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp, totalSize=35.2 K 2024-12-17T12:39:46,420 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting a87fa336362c4c549be8cdcd87a3a2d1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1734439182795 2024-12-17T12:39:46,420 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 56006a5a86b349709635719d584e9db1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1734439182795 2024-12-17T12:39:46,420 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting e7a7c90af26b4c75a8bbc8a780367a4e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1734439182840 2024-12-17T12:39:46,420 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 33a062d3b0424d658c01bff6ba8a9418, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1734439182840 2024-12-17T12:39:46,421 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 55f9c369db4342baaa8e955e1b3f6823, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1734439184965 2024-12-17T12:39:46,421 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8d2fa36e38914f0ba9708d585fd7db79, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1734439184964 2024-12-17T12:39:46,426 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 224d188997b94bb7d93c906d2c2bf845#B#compaction#339 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:39:46,426 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 224d188997b94bb7d93c906d2c2bf845#A#compaction#338 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:39:46,426 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/ec832ffafd0f4618be3cde89daba1863 is 50, key is test_row_0/A:col10/1734439185578/Put/seqid=0 2024-12-17T12:39:46,426 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/276f2fc55a624f508c49cc328562b3a8 is 50, key is test_row_0/B:col10/1734439185578/Put/seqid=0 2024-12-17T12:39:46,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742236_1412 (size=12104) 2024-12-17T12:39:46,438 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/276f2fc55a624f508c49cc328562b3a8 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/276f2fc55a624f508c49cc328562b3a8 2024-12-17T12:39:46,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742235_1411 (size=12104) 2024-12-17T12:39:46,442 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 224d188997b94bb7d93c906d2c2bf845/B of 224d188997b94bb7d93c906d2c2bf845 into 276f2fc55a624f508c49cc328562b3a8(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:39:46,442 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 224d188997b94bb7d93c906d2c2bf845: 2024-12-17T12:39:46,442 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845., storeName=224d188997b94bb7d93c906d2c2bf845/B, priority=13, startTime=1734439186419; duration=0sec 2024-12-17T12:39:46,442 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:39:46,442 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 224d188997b94bb7d93c906d2c2bf845:B 2024-12-17T12:39:46,442 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:39:46,443 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:39:46,443 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 224d188997b94bb7d93c906d2c2bf845/C is initiating minor compaction (all files) 2024-12-17T12:39:46,443 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 224d188997b94bb7d93c906d2c2bf845/C in TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:46,443 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/3254b4380bbf4091abdc87176505f2e2, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/89945f84eb2f4131a72ec63e3cd21c28, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/d290113bf1cf4f79956963b7cb9da415] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp, totalSize=35.2 K 2024-12-17T12:39:46,443 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 3254b4380bbf4091abdc87176505f2e2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1734439182795 2024-12-17T12:39:46,443 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 89945f84eb2f4131a72ec63e3cd21c28, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1734439182840 2024-12-17T12:39:46,444 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting d290113bf1cf4f79956963b7cb9da415, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1734439184965 2024-12-17T12:39:46,449 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 224d188997b94bb7d93c906d2c2bf845#C#compaction#340 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:39:46,449 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/24a20b0a1975479d84c2ad9cf7ac9639 is 50, key is test_row_0/C:col10/1734439185578/Put/seqid=0 2024-12-17T12:39:46,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742237_1413 (size=12104) 2024-12-17T12:39:46,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 224d188997b94bb7d93c906d2c2bf845 2024-12-17T12:39:46,727 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 224d188997b94bb7d93c906d2c2bf845 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-17T12:39:46,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 224d188997b94bb7d93c906d2c2bf845, store=A 2024-12-17T12:39:46,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:46,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 224d188997b94bb7d93c906d2c2bf845, store=B 2024-12-17T12:39:46,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:46,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 224d188997b94bb7d93c906d2c2bf845, store=C 2024-12-17T12:39:46,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:46,731 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/d0469768b89448faa13e43a15571d1cf is 50, key is test_row_0/A:col10/1734439186727/Put/seqid=0 2024-12-17T12:39:46,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742238_1414 (size=12001) 2024-12-17T12:39:46,788 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:46,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439246779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:46,792 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:46,792 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:46,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439246788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:46,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439246787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:46,792 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:46,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439246789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:46,845 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/ec832ffafd0f4618be3cde89daba1863 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/ec832ffafd0f4618be3cde89daba1863 2024-12-17T12:39:46,848 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 224d188997b94bb7d93c906d2c2bf845/A of 224d188997b94bb7d93c906d2c2bf845 into ec832ffafd0f4618be3cde89daba1863(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:39:46,848 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 224d188997b94bb7d93c906d2c2bf845: 2024-12-17T12:39:46,848 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845., storeName=224d188997b94bb7d93c906d2c2bf845/A, priority=13, startTime=1734439186419; duration=0sec 2024-12-17T12:39:46,848 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:39:46,848 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 224d188997b94bb7d93c906d2c2bf845:A 2024-12-17T12:39:46,856 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/24a20b0a1975479d84c2ad9cf7ac9639 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/24a20b0a1975479d84c2ad9cf7ac9639 2024-12-17T12:39:46,859 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 224d188997b94bb7d93c906d2c2bf845/C of 224d188997b94bb7d93c906d2c2bf845 into 24a20b0a1975479d84c2ad9cf7ac9639(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:39:46,859 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 224d188997b94bb7d93c906d2c2bf845: 2024-12-17T12:39:46,859 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845., storeName=224d188997b94bb7d93c906d2c2bf845/C, priority=13, startTime=1734439186419; duration=0sec 2024-12-17T12:39:46,859 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:39:46,859 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 224d188997b94bb7d93c906d2c2bf845:C 2024-12-17T12:39:46,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-17T12:39:46,890 INFO [Thread-1819 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 116 completed 2024-12-17T12:39:46,891 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-17T12:39:46,891 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:46,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439246890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:46,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees 2024-12-17T12:39:46,892 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-17T12:39:46,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-17T12:39:46,892 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-17T12:39:46,892 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=118, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-17T12:39:46,895 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:46,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439246893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:46,895 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:46,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439246893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:46,895 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:46,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439246893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:46,992 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:46,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33252 deadline: 1734439246989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:46,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-17T12:39:46,993 DEBUG [Thread-1811 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4151 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845., hostname=681c08bfdbdf,36491,1734439058372, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor41.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-17T12:39:47,043 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:47,043 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-17T12:39:47,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:47,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. as already flushing 2024-12-17T12:39:47,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:47,044 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:47,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:47,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:47,096 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:47,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439247092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:47,100 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:47,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439247097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:47,101 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:47,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439247097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:47,101 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:47,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439247097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:47,179 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/d0469768b89448faa13e43a15571d1cf 2024-12-17T12:39:47,184 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/7d0031d838514968a52058de8a040799 is 50, key is test_row_0/B:col10/1734439186727/Put/seqid=0 2024-12-17T12:39:47,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742239_1415 (size=12001) 2024-12-17T12:39:47,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-17T12:39:47,195 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:47,195 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-17T12:39:47,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:47,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. as already flushing 2024-12-17T12:39:47,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:47,195 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:47,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:47,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:47,347 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:47,347 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-17T12:39:47,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:47,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. as already flushing 2024-12-17T12:39:47,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:47,348 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:47,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:47,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:47,398 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:47,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439247397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:47,405 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:47,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439247402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:47,405 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:47,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439247404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:47,405 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:47,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439247404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:47,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-17T12:39:47,499 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:47,499 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-17T12:39:47,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:47,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. as already flushing 2024-12-17T12:39:47,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:47,500 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:47,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:47,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:47,588 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/7d0031d838514968a52058de8a040799 2024-12-17T12:39:47,593 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/b8281b1e25cf49daacbf5b41dcda10fb is 50, key is test_row_0/C:col10/1734439186727/Put/seqid=0 2024-12-17T12:39:47,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742240_1416 (size=12001) 2024-12-17T12:39:47,651 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:47,652 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-17T12:39:47,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:47,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. as already flushing 2024-12-17T12:39:47,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:47,652 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:47,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:47,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:47,804 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:47,804 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-17T12:39:47,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:47,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. as already flushing 2024-12-17T12:39:47,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:47,804 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:47,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:47,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:47,903 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:47,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439247900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:47,910 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:47,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439247907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:47,910 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:47,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439247908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:47,913 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:47,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439247910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:47,956 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:47,956 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-17T12:39:47,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:47,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. as already flushing 2024-12-17T12:39:47,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:47,956 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:47,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:47,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:47,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-17T12:39:47,998 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/b8281b1e25cf49daacbf5b41dcda10fb 2024-12-17T12:39:48,001 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/d0469768b89448faa13e43a15571d1cf as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/d0469768b89448faa13e43a15571d1cf 2024-12-17T12:39:48,004 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/d0469768b89448faa13e43a15571d1cf, entries=150, sequenceid=78, filesize=11.7 K 2024-12-17T12:39:48,005 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/7d0031d838514968a52058de8a040799 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/7d0031d838514968a52058de8a040799 2024-12-17T12:39:48,007 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/7d0031d838514968a52058de8a040799, entries=150, sequenceid=78, filesize=11.7 K 2024-12-17T12:39:48,008 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/b8281b1e25cf49daacbf5b41dcda10fb as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/b8281b1e25cf49daacbf5b41dcda10fb 2024-12-17T12:39:48,010 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/b8281b1e25cf49daacbf5b41dcda10fb, entries=150, sequenceid=78, filesize=11.7 K 2024-12-17T12:39:48,011 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 224d188997b94bb7d93c906d2c2bf845 in 1284ms, sequenceid=78, compaction requested=false 2024-12-17T12:39:48,011 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 224d188997b94bb7d93c906d2c2bf845: 2024-12-17T12:39:48,108 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:48,108 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-17T12:39:48,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:48,108 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2837): Flushing 224d188997b94bb7d93c906d2c2bf845 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-17T12:39:48,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 224d188997b94bb7d93c906d2c2bf845, store=A 2024-12-17T12:39:48,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:48,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 224d188997b94bb7d93c906d2c2bf845, store=B 2024-12-17T12:39:48,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:48,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 224d188997b94bb7d93c906d2c2bf845, store=C 2024-12-17T12:39:48,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:48,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/90e5e08f76e3457f8dd43ec6d45ce580 is 50, key is test_row_0/A:col10/1734439186777/Put/seqid=0 2024-12-17T12:39:48,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742241_1417 (size=12001) 2024-12-17T12:39:48,516 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/90e5e08f76e3457f8dd43ec6d45ce580 2024-12-17T12:39:48,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/1b680e3efd5d48b2a1cded5f9a3161a5 is 50, key is test_row_0/B:col10/1734439186777/Put/seqid=0 2024-12-17T12:39:48,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742242_1418 (size=12001) 2024-12-17T12:39:48,911 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. as already flushing 2024-12-17T12:39:48,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 224d188997b94bb7d93c906d2c2bf845 2024-12-17T12:39:48,924 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/1b680e3efd5d48b2a1cded5f9a3161a5 2024-12-17T12:39:48,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/4f9e4c104deb4ed6b96b668bd4a2823b is 50, key is test_row_0/C:col10/1734439186777/Put/seqid=0 2024-12-17T12:39:48,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742243_1419 (size=12001) 2024-12-17T12:39:48,949 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/4f9e4c104deb4ed6b96b668bd4a2823b 2024-12-17T12:39:48,949 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:48,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439248944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:48,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/90e5e08f76e3457f8dd43ec6d45ce580 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/90e5e08f76e3457f8dd43ec6d45ce580 2024-12-17T12:39:48,953 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:48,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439248947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:48,953 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:48,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439248948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:48,953 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:48,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439248949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:48,955 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/90e5e08f76e3457f8dd43ec6d45ce580, entries=150, sequenceid=91, filesize=11.7 K 2024-12-17T12:39:48,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/1b680e3efd5d48b2a1cded5f9a3161a5 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/1b680e3efd5d48b2a1cded5f9a3161a5 2024-12-17T12:39:48,958 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/1b680e3efd5d48b2a1cded5f9a3161a5, entries=150, sequenceid=91, filesize=11.7 K 2024-12-17T12:39:48,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/4f9e4c104deb4ed6b96b668bd4a2823b as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/4f9e4c104deb4ed6b96b668bd4a2823b 2024-12-17T12:39:48,962 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/4f9e4c104deb4ed6b96b668bd4a2823b, entries=150, sequenceid=91, filesize=11.7 K 2024-12-17T12:39:48,963 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 224d188997b94bb7d93c906d2c2bf845 in 855ms, sequenceid=91, compaction requested=true 2024-12-17T12:39:48,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2538): Flush status journal for 224d188997b94bb7d93c906d2c2bf845: 2024-12-17T12:39:48,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:48,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=119 2024-12-17T12:39:48,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4106): Remote procedure done, pid=119 2024-12-17T12:39:48,965 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=118 2024-12-17T12:39:48,965 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0720 sec 2024-12-17T12:39:48,966 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees in 2.0740 sec 2024-12-17T12:39:48,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-17T12:39:48,996 INFO [Thread-1819 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 118 completed 2024-12-17T12:39:48,998 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-17T12:39:48,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees 2024-12-17T12:39:48,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-17T12:39:48,999 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-17T12:39:48,999 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-17T12:39:48,999 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-17T12:39:49,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 224d188997b94bb7d93c906d2c2bf845 2024-12-17T12:39:49,052 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 224d188997b94bb7d93c906d2c2bf845 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-17T12:39:49,053 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 224d188997b94bb7d93c906d2c2bf845, store=A 2024-12-17T12:39:49,053 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:49,053 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 224d188997b94bb7d93c906d2c2bf845, store=B 2024-12-17T12:39:49,053 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:49,053 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 224d188997b94bb7d93c906d2c2bf845, store=C 2024-12-17T12:39:49,053 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:49,056 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/16496e7c2b3344189cd3a6a89575dca4 is 50, key is test_row_0/A:col10/1734439188948/Put/seqid=0 2024-12-17T12:39:49,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742244_1420 (size=14341) 2024-12-17T12:39:49,086 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:49,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439249060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:49,090 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:49,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439249086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:49,090 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:49,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439249086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:49,090 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:49,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439249086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:49,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-17T12:39:49,151 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:49,151 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-17T12:39:49,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:49,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. as already flushing 2024-12-17T12:39:49,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:49,151 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:49,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:49,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:49,190 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:49,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439249187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:49,192 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:49,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439249191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:49,193 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:49,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439249191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:49,193 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:49,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439249191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:49,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-17T12:39:49,303 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:49,303 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-17T12:39:49,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:49,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. as already flushing 2024-12-17T12:39:49,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:49,304 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:49,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:49,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:49,397 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:49,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439249391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:49,397 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:49,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439249393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:49,397 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:49,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439249394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:49,398 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:49,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439249394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:49,455 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:49,455 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-17T12:39:49,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:49,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. as already flushing 2024-12-17T12:39:49,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:49,456 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:49,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:49,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:49,459 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/16496e7c2b3344189cd3a6a89575dca4 2024-12-17T12:39:49,464 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/f46dbdaebd9441fa929d3f9046bb8ffe is 50, key is test_row_0/B:col10/1734439188948/Put/seqid=0 2024-12-17T12:39:49,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742245_1421 (size=12001) 2024-12-17T12:39:49,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-17T12:39:49,607 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:49,607 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-17T12:39:49,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:49,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. as already flushing 2024-12-17T12:39:49,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:49,608 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:49,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:49,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:49,700 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:49,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439249698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:49,700 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:49,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439249700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:49,701 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:49,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439249700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:49,704 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:49,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439249700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:49,759 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:49,760 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-17T12:39:49,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:49,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. as already flushing 2024-12-17T12:39:49,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:49,760 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:49,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:49,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:49,867 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/f46dbdaebd9441fa929d3f9046bb8ffe 2024-12-17T12:39:49,895 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/617c5de5fe0d4da1a7f08818e4ce99b8 is 50, key is test_row_0/C:col10/1734439188948/Put/seqid=0 2024-12-17T12:39:49,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742246_1422 (size=12001) 2024-12-17T12:39:49,911 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:49,912 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-17T12:39:49,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:49,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. as already flushing 2024-12-17T12:39:49,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:49,912 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:49,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:49,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:50,064 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:50,064 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-17T12:39:50,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:50,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. as already flushing 2024-12-17T12:39:50,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:50,064 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:50,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:50,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:50,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-17T12:39:50,205 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:50,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439250202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:50,207 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:50,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439250204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:50,207 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:50,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439250205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:50,211 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:50,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439250209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:50,216 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:50,216 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-17T12:39:50,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:50,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. as already flushing 2024-12-17T12:39:50,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:50,216 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:50,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:50,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:50,299 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/617c5de5fe0d4da1a7f08818e4ce99b8 2024-12-17T12:39:50,302 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/16496e7c2b3344189cd3a6a89575dca4 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/16496e7c2b3344189cd3a6a89575dca4 2024-12-17T12:39:50,305 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/16496e7c2b3344189cd3a6a89575dca4, entries=200, sequenceid=117, filesize=14.0 K 2024-12-17T12:39:50,305 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/f46dbdaebd9441fa929d3f9046bb8ffe as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/f46dbdaebd9441fa929d3f9046bb8ffe 2024-12-17T12:39:50,308 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/f46dbdaebd9441fa929d3f9046bb8ffe, entries=150, sequenceid=117, filesize=11.7 K 2024-12-17T12:39:50,309 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/617c5de5fe0d4da1a7f08818e4ce99b8 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/617c5de5fe0d4da1a7f08818e4ce99b8 2024-12-17T12:39:50,311 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/617c5de5fe0d4da1a7f08818e4ce99b8, entries=150, sequenceid=117, filesize=11.7 K 2024-12-17T12:39:50,312 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 224d188997b94bb7d93c906d2c2bf845 in 1260ms, sequenceid=117, compaction requested=true 2024-12-17T12:39:50,312 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 224d188997b94bb7d93c906d2c2bf845: 2024-12-17T12:39:50,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 224d188997b94bb7d93c906d2c2bf845:A, priority=-2147483648, current under compaction store size is 1 2024-12-17T12:39:50,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:39:50,312 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-17T12:39:50,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 224d188997b94bb7d93c906d2c2bf845:B, priority=-2147483648, current under compaction store size is 2 2024-12-17T12:39:50,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:39:50,312 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-17T12:39:50,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 224d188997b94bb7d93c906d2c2bf845:C, priority=-2147483648, current under compaction store size is 3 2024-12-17T12:39:50,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:39:50,313 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-17T12:39:50,313 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50447 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-17T12:39:50,313 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): 224d188997b94bb7d93c906d2c2bf845/A is initiating minor compaction (all files) 2024-12-17T12:39:50,313 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 224d188997b94bb7d93c906d2c2bf845/B is initiating minor compaction (all files) 2024-12-17T12:39:50,313 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 224d188997b94bb7d93c906d2c2bf845/A in TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:50,313 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 224d188997b94bb7d93c906d2c2bf845/B in TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:50,313 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/ec832ffafd0f4618be3cde89daba1863, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/d0469768b89448faa13e43a15571d1cf, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/90e5e08f76e3457f8dd43ec6d45ce580, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/16496e7c2b3344189cd3a6a89575dca4] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp, totalSize=49.3 K 2024-12-17T12:39:50,313 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/276f2fc55a624f508c49cc328562b3a8, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/7d0031d838514968a52058de8a040799, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/1b680e3efd5d48b2a1cded5f9a3161a5, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/f46dbdaebd9441fa929d3f9046bb8ffe] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp, totalSize=47.0 K 2024-12-17T12:39:50,313 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 276f2fc55a624f508c49cc328562b3a8, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1734439184965 2024-12-17T12:39:50,313 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting ec832ffafd0f4618be3cde89daba1863, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1734439184965 2024-12-17T12:39:50,313 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting d0469768b89448faa13e43a15571d1cf, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1734439185598 2024-12-17T12:39:50,313 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 7d0031d838514968a52058de8a040799, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1734439185598 2024-12-17T12:39:50,314 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 90e5e08f76e3457f8dd43ec6d45ce580, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1734439186777 2024-12-17T12:39:50,314 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 1b680e3efd5d48b2a1cded5f9a3161a5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1734439186777 2024-12-17T12:39:50,314 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 16496e7c2b3344189cd3a6a89575dca4, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1734439188937 2024-12-17T12:39:50,314 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting f46dbdaebd9441fa929d3f9046bb8ffe, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1734439188943 2024-12-17T12:39:50,319 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 224d188997b94bb7d93c906d2c2bf845#A#compaction#351 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:39:50,319 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 224d188997b94bb7d93c906d2c2bf845#B#compaction#350 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:39:50,319 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/6cb2d1a04d53475dbbad3311491bbd72 is 50, key is test_row_0/B:col10/1734439188948/Put/seqid=0 2024-12-17T12:39:50,319 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/222a84a8c61a4488ae779a341aca1b3f is 50, key is test_row_0/A:col10/1734439188948/Put/seqid=0 2024-12-17T12:39:50,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742248_1424 (size=12241) 2024-12-17T12:39:50,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742247_1423 (size=12241) 2024-12-17T12:39:50,368 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:50,368 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-17T12:39:50,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:50,369 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2837): Flushing 224d188997b94bb7d93c906d2c2bf845 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-17T12:39:50,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 224d188997b94bb7d93c906d2c2bf845, store=A 2024-12-17T12:39:50,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:50,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 224d188997b94bb7d93c906d2c2bf845, store=B 2024-12-17T12:39:50,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:50,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 224d188997b94bb7d93c906d2c2bf845, store=C 2024-12-17T12:39:50,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:50,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/35b3a5b80fef445b91d002645010a799 is 50, key is test_row_1/A:col10/1734439189067/Put/seqid=0 2024-12-17T12:39:50,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742249_1425 (size=9657) 2024-12-17T12:39:50,726 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/222a84a8c61a4488ae779a341aca1b3f as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/222a84a8c61a4488ae779a341aca1b3f 2024-12-17T12:39:50,727 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/6cb2d1a04d53475dbbad3311491bbd72 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/6cb2d1a04d53475dbbad3311491bbd72 2024-12-17T12:39:50,730 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 224d188997b94bb7d93c906d2c2bf845/B of 224d188997b94bb7d93c906d2c2bf845 into 6cb2d1a04d53475dbbad3311491bbd72(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:39:50,730 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 224d188997b94bb7d93c906d2c2bf845/A of 224d188997b94bb7d93c906d2c2bf845 into 222a84a8c61a4488ae779a341aca1b3f(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:39:50,730 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 224d188997b94bb7d93c906d2c2bf845: 2024-12-17T12:39:50,730 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 224d188997b94bb7d93c906d2c2bf845: 2024-12-17T12:39:50,730 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845., storeName=224d188997b94bb7d93c906d2c2bf845/A, priority=12, startTime=1734439190312; duration=0sec 2024-12-17T12:39:50,730 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845., storeName=224d188997b94bb7d93c906d2c2bf845/B, priority=12, startTime=1734439190312; duration=0sec 2024-12-17T12:39:50,731 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:39:50,731 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 224d188997b94bb7d93c906d2c2bf845:A 2024-12-17T12:39:50,731 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:39:50,731 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 224d188997b94bb7d93c906d2c2bf845:B 2024-12-17T12:39:50,731 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-17T12:39:50,732 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-17T12:39:50,732 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): 224d188997b94bb7d93c906d2c2bf845/C is initiating minor compaction (all files) 2024-12-17T12:39:50,732 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 224d188997b94bb7d93c906d2c2bf845/C in TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:50,732 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/24a20b0a1975479d84c2ad9cf7ac9639, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/b8281b1e25cf49daacbf5b41dcda10fb, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/4f9e4c104deb4ed6b96b668bd4a2823b, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/617c5de5fe0d4da1a7f08818e4ce99b8] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp, totalSize=47.0 K 2024-12-17T12:39:50,732 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 24a20b0a1975479d84c2ad9cf7ac9639, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1734439184965 2024-12-17T12:39:50,732 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting b8281b1e25cf49daacbf5b41dcda10fb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1734439185598 2024-12-17T12:39:50,732 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4f9e4c104deb4ed6b96b668bd4a2823b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1734439186777 2024-12-17T12:39:50,733 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 617c5de5fe0d4da1a7f08818e4ce99b8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1734439188943 2024-12-17T12:39:50,738 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 224d188997b94bb7d93c906d2c2bf845#C#compaction#353 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:39:50,739 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/c8bfa1dfe76842e5bc7aad2f69bef2aa is 50, key is test_row_0/C:col10/1734439188948/Put/seqid=0 2024-12-17T12:39:50,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742250_1426 (size=12241) 2024-12-17T12:39:50,776 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/35b3a5b80fef445b91d002645010a799 2024-12-17T12:39:50,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/f85bf8efec7941779918a15108160606 is 50, key is test_row_1/B:col10/1734439189067/Put/seqid=0 2024-12-17T12:39:50,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742251_1427 (size=9657) 2024-12-17T12:39:50,785 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/f85bf8efec7941779918a15108160606 2024-12-17T12:39:50,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/af0c8ab25a9c4855aa509703cc206946 is 50, key is test_row_1/C:col10/1734439189067/Put/seqid=0 2024-12-17T12:39:50,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742252_1428 (size=9657) 2024-12-17T12:39:51,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 224d188997b94bb7d93c906d2c2bf845 2024-12-17T12:39:51,034 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. as already flushing 2024-12-17T12:39:51,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-17T12:39:51,111 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:51,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33252 deadline: 1734439251109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:51,145 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/c8bfa1dfe76842e5bc7aad2f69bef2aa as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/c8bfa1dfe76842e5bc7aad2f69bef2aa 2024-12-17T12:39:51,149 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 224d188997b94bb7d93c906d2c2bf845/C of 224d188997b94bb7d93c906d2c2bf845 into c8bfa1dfe76842e5bc7aad2f69bef2aa(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:39:51,149 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 224d188997b94bb7d93c906d2c2bf845: 2024-12-17T12:39:51,149 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845., storeName=224d188997b94bb7d93c906d2c2bf845/C, priority=12, startTime=1734439190312; duration=0sec 2024-12-17T12:39:51,149 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:39:51,149 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 224d188997b94bb7d93c906d2c2bf845:C 2024-12-17T12:39:51,200 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/af0c8ab25a9c4855aa509703cc206946 2024-12-17T12:39:51,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/35b3a5b80fef445b91d002645010a799 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/35b3a5b80fef445b91d002645010a799 2024-12-17T12:39:51,211 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/35b3a5b80fef445b91d002645010a799, entries=100, sequenceid=127, filesize=9.4 K 2024-12-17T12:39:51,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/f85bf8efec7941779918a15108160606 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/f85bf8efec7941779918a15108160606 2024-12-17T12:39:51,212 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:51,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439251209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:51,212 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:51,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439251209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:51,215 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/f85bf8efec7941779918a15108160606, entries=100, sequenceid=127, filesize=9.4 K 2024-12-17T12:39:51,215 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:51,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439251212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:51,216 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:51,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33252 deadline: 1734439251212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:51,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/af0c8ab25a9c4855aa509703cc206946 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/af0c8ab25a9c4855aa509703cc206946 2024-12-17T12:39:51,217 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:51,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439251215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:51,219 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/af0c8ab25a9c4855aa509703cc206946, entries=100, sequenceid=127, filesize=9.4 K 2024-12-17T12:39:51,220 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 224d188997b94bb7d93c906d2c2bf845 in 851ms, sequenceid=127, compaction requested=false 2024-12-17T12:39:51,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2538): Flush status journal for 224d188997b94bb7d93c906d2c2bf845: 2024-12-17T12:39:51,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:51,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=121 2024-12-17T12:39:51,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4106): Remote procedure done, pid=121 2024-12-17T12:39:51,222 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=121, resume processing ppid=120 2024-12-17T12:39:51,222 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, ppid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2220 sec 2024-12-17T12:39:51,222 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees in 2.2240 sec 2024-12-17T12:39:51,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 224d188997b94bb7d93c906d2c2bf845 2024-12-17T12:39:51,420 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 224d188997b94bb7d93c906d2c2bf845 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-17T12:39:51,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 224d188997b94bb7d93c906d2c2bf845, store=A 2024-12-17T12:39:51,421 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:51,421 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 224d188997b94bb7d93c906d2c2bf845, store=B 2024-12-17T12:39:51,421 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:51,421 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 224d188997b94bb7d93c906d2c2bf845, store=C 2024-12-17T12:39:51,421 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:51,424 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/fcff3fbc84e040d38ed0d7dd0707f1a8 is 50, key is test_row_0/A:col10/1734439191109/Put/seqid=0 2024-12-17T12:39:51,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742253_1429 (size=12151) 2024-12-17T12:39:51,437 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:51,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33252 deadline: 1734439251436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:51,540 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:51,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33252 deadline: 1734439251538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:51,745 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:51,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33252 deadline: 1734439251741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:51,828 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/fcff3fbc84e040d38ed0d7dd0707f1a8 2024-12-17T12:39:51,864 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/1971ae45c0ed43469fa2295c48d9c13f is 50, key is test_row_0/B:col10/1734439191109/Put/seqid=0 2024-12-17T12:39:51,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742254_1430 (size=12151) 2024-12-17T12:39:52,048 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:52,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33252 deadline: 1734439252046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:52,267 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/1971ae45c0ed43469fa2295c48d9c13f 2024-12-17T12:39:52,273 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/bfabe93df41d46cca4be4b2aa09b9dcd is 50, key is test_row_0/C:col10/1734439191109/Put/seqid=0 2024-12-17T12:39:52,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742255_1431 (size=12151) 2024-12-17T12:39:52,551 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:52,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33252 deadline: 1734439252549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:52,676 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/bfabe93df41d46cca4be4b2aa09b9dcd 2024-12-17T12:39:52,679 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/fcff3fbc84e040d38ed0d7dd0707f1a8 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/fcff3fbc84e040d38ed0d7dd0707f1a8 2024-12-17T12:39:52,682 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/fcff3fbc84e040d38ed0d7dd0707f1a8, entries=150, sequenceid=157, filesize=11.9 K 2024-12-17T12:39:52,682 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/1971ae45c0ed43469fa2295c48d9c13f as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/1971ae45c0ed43469fa2295c48d9c13f 2024-12-17T12:39:52,685 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/1971ae45c0ed43469fa2295c48d9c13f, entries=150, sequenceid=157, filesize=11.9 K 2024-12-17T12:39:52,686 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/bfabe93df41d46cca4be4b2aa09b9dcd as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/bfabe93df41d46cca4be4b2aa09b9dcd 2024-12-17T12:39:52,688 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/bfabe93df41d46cca4be4b2aa09b9dcd, entries=150, sequenceid=157, filesize=11.9 K 2024-12-17T12:39:52,689 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 224d188997b94bb7d93c906d2c2bf845 in 1268ms, sequenceid=157, compaction requested=true 2024-12-17T12:39:52,689 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 224d188997b94bb7d93c906d2c2bf845: 2024-12-17T12:39:52,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 224d188997b94bb7d93c906d2c2bf845:A, priority=-2147483648, current under compaction store size is 1 2024-12-17T12:39:52,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:39:52,689 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:39:52,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 224d188997b94bb7d93c906d2c2bf845:B, priority=-2147483648, current under compaction store size is 2 2024-12-17T12:39:52,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:39:52,689 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:39:52,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 224d188997b94bb7d93c906d2c2bf845:C, priority=-2147483648, current under compaction store size is 3 2024-12-17T12:39:52,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:39:52,690 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34049 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:39:52,690 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 224d188997b94bb7d93c906d2c2bf845/B is initiating minor compaction (all files) 2024-12-17T12:39:52,690 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 224d188997b94bb7d93c906d2c2bf845/B in TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:52,690 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/6cb2d1a04d53475dbbad3311491bbd72, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/f85bf8efec7941779918a15108160606, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/1971ae45c0ed43469fa2295c48d9c13f] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp, totalSize=33.3 K 2024-12-17T12:39:52,690 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34049 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:39:52,690 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): 224d188997b94bb7d93c906d2c2bf845/A is initiating minor compaction (all files) 2024-12-17T12:39:52,690 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 224d188997b94bb7d93c906d2c2bf845/A in TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:52,690 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/222a84a8c61a4488ae779a341aca1b3f, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/35b3a5b80fef445b91d002645010a799, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/fcff3fbc84e040d38ed0d7dd0707f1a8] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp, totalSize=33.3 K 2024-12-17T12:39:52,691 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 6cb2d1a04d53475dbbad3311491bbd72, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1734439188943 2024-12-17T12:39:52,691 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 222a84a8c61a4488ae779a341aca1b3f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1734439188943 2024-12-17T12:39:52,691 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 35b3a5b80fef445b91d002645010a799, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1734439189063 2024-12-17T12:39:52,691 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting f85bf8efec7941779918a15108160606, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1734439189063 2024-12-17T12:39:52,691 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting fcff3fbc84e040d38ed0d7dd0707f1a8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1734439191073 2024-12-17T12:39:52,692 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 1971ae45c0ed43469fa2295c48d9c13f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1734439191073 2024-12-17T12:39:52,696 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 224d188997b94bb7d93c906d2c2bf845#B#compaction#359 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:39:52,696 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/7c8d9acce68343d0b008e0e27935ea5d is 50, key is test_row_0/B:col10/1734439191109/Put/seqid=0 2024-12-17T12:39:52,697 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 224d188997b94bb7d93c906d2c2bf845#A#compaction#360 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:39:52,697 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/2240b705696742ac9f6f54871d724dcb is 50, key is test_row_0/A:col10/1734439191109/Put/seqid=0 2024-12-17T12:39:52,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742256_1432 (size=12493) 2024-12-17T12:39:52,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742257_1433 (size=12493) 2024-12-17T12:39:52,703 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/2240b705696742ac9f6f54871d724dcb as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/2240b705696742ac9f6f54871d724dcb 2024-12-17T12:39:52,706 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 224d188997b94bb7d93c906d2c2bf845/A of 224d188997b94bb7d93c906d2c2bf845 into 2240b705696742ac9f6f54871d724dcb(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:39:52,706 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 224d188997b94bb7d93c906d2c2bf845: 2024-12-17T12:39:52,706 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845., storeName=224d188997b94bb7d93c906d2c2bf845/A, priority=13, startTime=1734439192689; duration=0sec 2024-12-17T12:39:52,706 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:39:52,706 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 224d188997b94bb7d93c906d2c2bf845:A 2024-12-17T12:39:52,706 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:39:52,707 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34049 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:39:52,707 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): 224d188997b94bb7d93c906d2c2bf845/C is initiating minor compaction (all files) 2024-12-17T12:39:52,707 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 224d188997b94bb7d93c906d2c2bf845/C in TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:52,707 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/c8bfa1dfe76842e5bc7aad2f69bef2aa, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/af0c8ab25a9c4855aa509703cc206946, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/bfabe93df41d46cca4be4b2aa09b9dcd] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp, totalSize=33.3 K 2024-12-17T12:39:52,707 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting c8bfa1dfe76842e5bc7aad2f69bef2aa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1734439188943 2024-12-17T12:39:52,708 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting af0c8ab25a9c4855aa509703cc206946, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1734439189063 2024-12-17T12:39:52,708 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting bfabe93df41d46cca4be4b2aa09b9dcd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1734439191073 2024-12-17T12:39:52,712 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 224d188997b94bb7d93c906d2c2bf845#C#compaction#361 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:39:52,713 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/20f7a9846b13490bba02eace5354aae4 is 50, key is test_row_0/C:col10/1734439191109/Put/seqid=0 2024-12-17T12:39:52,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742258_1434 (size=12493) 2024-12-17T12:39:53,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-17T12:39:53,102 INFO [Thread-1819 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 120 completed 2024-12-17T12:39:53,103 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-17T12:39:53,104 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/7c8d9acce68343d0b008e0e27935ea5d as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/7c8d9acce68343d0b008e0e27935ea5d 2024-12-17T12:39:53,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=122, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees 2024-12-17T12:39:53,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-17T12:39:53,106 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=122, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-17T12:39:53,107 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=122, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-17T12:39:53,107 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-17T12:39:53,107 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 224d188997b94bb7d93c906d2c2bf845/B of 224d188997b94bb7d93c906d2c2bf845 into 7c8d9acce68343d0b008e0e27935ea5d(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:39:53,107 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 224d188997b94bb7d93c906d2c2bf845: 2024-12-17T12:39:53,107 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845., storeName=224d188997b94bb7d93c906d2c2bf845/B, priority=13, startTime=1734439192689; duration=0sec 2024-12-17T12:39:53,107 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:39:53,107 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 224d188997b94bb7d93c906d2c2bf845:B 2024-12-17T12:39:53,119 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/20f7a9846b13490bba02eace5354aae4 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/20f7a9846b13490bba02eace5354aae4 2024-12-17T12:39:53,122 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 224d188997b94bb7d93c906d2c2bf845/C of 224d188997b94bb7d93c906d2c2bf845 into 20f7a9846b13490bba02eace5354aae4(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:39:53,122 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 224d188997b94bb7d93c906d2c2bf845: 2024-12-17T12:39:53,122 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845., storeName=224d188997b94bb7d93c906d2c2bf845/C, priority=13, startTime=1734439192689; duration=0sec 2024-12-17T12:39:53,122 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:39:53,122 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 224d188997b94bb7d93c906d2c2bf845:C 2024-12-17T12:39:53,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-17T12:39:53,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 224d188997b94bb7d93c906d2c2bf845 2024-12-17T12:39:53,217 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 224d188997b94bb7d93c906d2c2bf845 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-17T12:39:53,217 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 224d188997b94bb7d93c906d2c2bf845, store=A 2024-12-17T12:39:53,217 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:53,217 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 224d188997b94bb7d93c906d2c2bf845, store=B 2024-12-17T12:39:53,217 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:53,217 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 224d188997b94bb7d93c906d2c2bf845, store=C 2024-12-17T12:39:53,217 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:53,225 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/fdf0f8b8ad8246eeaa94a2330c96ff89 is 50, key is test_row_0/A:col10/1734439191433/Put/seqid=0 2024-12-17T12:39:53,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742259_1435 (size=12151) 2024-12-17T12:39:53,256 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:53,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439253253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:53,258 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:53,258 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:53,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439253254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:53,258 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-12-17T12:39:53,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:53,258 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:53,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. as already flushing 2024-12-17T12:39:53,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439253255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:53,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:53,259 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:53,259 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:53,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439253256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:53,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:53,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:53,360 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:53,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439253357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:53,361 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:53,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439253359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:53,361 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:53,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439253359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:53,361 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:53,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439253359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:53,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-17T12:39:53,410 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:53,411 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-12-17T12:39:53,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:53,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. as already flushing 2024-12-17T12:39:53,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:53,411 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:53,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:53,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:53,560 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:53,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33252 deadline: 1734439253559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:53,562 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:53,562 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:53,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439253561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:53,563 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-12-17T12:39:53,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:53,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. as already flushing 2024-12-17T12:39:53,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:53,563 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:53,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:53,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:53,565 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:53,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439253561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:53,565 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:53,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439253561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:53,565 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:53,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439253562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:53,628 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/fdf0f8b8ad8246eeaa94a2330c96ff89 2024-12-17T12:39:53,633 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/d784e3fadd2944f88e9b9ac70193c130 is 50, key is test_row_0/B:col10/1734439191433/Put/seqid=0 2024-12-17T12:39:53,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742260_1436 (size=12151) 2024-12-17T12:39:53,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-17T12:39:53,714 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:53,715 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-12-17T12:39:53,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:53,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. as already flushing 2024-12-17T12:39:53,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:53,715 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:53,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:53,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:53,866 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:53,867 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-12-17T12:39:53,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:53,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. as already flushing 2024-12-17T12:39:53,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:53,867 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:53,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:53,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:53,868 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:53,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439253864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:53,869 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:53,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439253866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:53,870 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:53,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439253866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:53,870 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:53,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439253867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:54,019 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:54,019 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-12-17T12:39:54,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:54,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. as already flushing 2024-12-17T12:39:54,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:54,020 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:54,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:54,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:54,037 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/d784e3fadd2944f88e9b9ac70193c130 2024-12-17T12:39:54,043 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/34859c6cc9784157ae1fab3c38e239ea is 50, key is test_row_0/C:col10/1734439191433/Put/seqid=0 2024-12-17T12:39:54,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742261_1437 (size=12151) 2024-12-17T12:39:54,171 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:54,172 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-12-17T12:39:54,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:54,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. as already flushing 2024-12-17T12:39:54,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:54,172 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:54,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:54,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:54,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-17T12:39:54,323 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:54,324 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-12-17T12:39:54,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:54,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. as already flushing 2024-12-17T12:39:54,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:54,324 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:54,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:54,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:54,372 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:54,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439254370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:54,373 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:54,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439254371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:54,374 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:54,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439254372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:54,376 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:54,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439254374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:54,457 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/34859c6cc9784157ae1fab3c38e239ea 2024-12-17T12:39:54,460 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/fdf0f8b8ad8246eeaa94a2330c96ff89 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/fdf0f8b8ad8246eeaa94a2330c96ff89 2024-12-17T12:39:54,465 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/fdf0f8b8ad8246eeaa94a2330c96ff89, entries=150, sequenceid=171, filesize=11.9 K 2024-12-17T12:39:54,466 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/d784e3fadd2944f88e9b9ac70193c130 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/d784e3fadd2944f88e9b9ac70193c130 2024-12-17T12:39:54,468 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/d784e3fadd2944f88e9b9ac70193c130, entries=150, sequenceid=171, filesize=11.9 K 2024-12-17T12:39:54,469 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/34859c6cc9784157ae1fab3c38e239ea as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/34859c6cc9784157ae1fab3c38e239ea 2024-12-17T12:39:54,472 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/34859c6cc9784157ae1fab3c38e239ea, entries=150, sequenceid=171, filesize=11.9 K 2024-12-17T12:39:54,472 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 224d188997b94bb7d93c906d2c2bf845 in 1255ms, sequenceid=171, compaction requested=false 2024-12-17T12:39:54,473 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 224d188997b94bb7d93c906d2c2bf845: 2024-12-17T12:39:54,476 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:54,476 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-12-17T12:39:54,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:54,476 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2837): Flushing 224d188997b94bb7d93c906d2c2bf845 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-17T12:39:54,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 224d188997b94bb7d93c906d2c2bf845, store=A 2024-12-17T12:39:54,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:54,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 224d188997b94bb7d93c906d2c2bf845, store=B 2024-12-17T12:39:54,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:54,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 224d188997b94bb7d93c906d2c2bf845, store=C 2024-12-17T12:39:54,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:54,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/86ae051e1553470893e7b47c6a2cf0f1 is 50, key is test_row_0/A:col10/1734439193248/Put/seqid=0 2024-12-17T12:39:54,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742262_1438 (size=12151) 2024-12-17T12:39:54,885 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/86ae051e1553470893e7b47c6a2cf0f1 2024-12-17T12:39:54,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/4c0deb9657494b67baa4b03c4d0d8a7e is 50, key is test_row_0/B:col10/1734439193248/Put/seqid=0 2024-12-17T12:39:54,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742263_1439 (size=12151) 2024-12-17T12:39:55,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-17T12:39:55,293 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/4c0deb9657494b67baa4b03c4d0d8a7e 2024-12-17T12:39:55,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/27d1631cde274d5e9e03de90f6ae2df8 is 50, key is test_row_0/C:col10/1734439193248/Put/seqid=0 2024-12-17T12:39:55,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742264_1440 (size=12151) 2024-12-17T12:39:55,377 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. as already flushing 2024-12-17T12:39:55,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 224d188997b94bb7d93c906d2c2bf845 2024-12-17T12:39:55,389 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:55,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439255385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:55,390 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:55,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439255389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:55,393 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:55,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439255389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:55,394 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:55,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439255389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:55,493 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:55,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439255490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:55,493 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:55,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439255490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:55,497 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:55,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439255494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:55,497 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:55,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439255494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:55,578 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:55,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33252 deadline: 1734439255575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:55,579 DEBUG [Thread-1811 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4143 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845., hostname=681c08bfdbdf,36491,1734439058372, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor41.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-17T12:39:55,695 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:55,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439255693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:55,695 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:55,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439255694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:55,699 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:55,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439255698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:55,702 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:55,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439255699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:55,702 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/27d1631cde274d5e9e03de90f6ae2df8 2024-12-17T12:39:55,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/86ae051e1553470893e7b47c6a2cf0f1 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/86ae051e1553470893e7b47c6a2cf0f1 2024-12-17T12:39:55,709 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/86ae051e1553470893e7b47c6a2cf0f1, entries=150, sequenceid=196, filesize=11.9 K 2024-12-17T12:39:55,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/4c0deb9657494b67baa4b03c4d0d8a7e as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/4c0deb9657494b67baa4b03c4d0d8a7e 2024-12-17T12:39:55,712 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/4c0deb9657494b67baa4b03c4d0d8a7e, entries=150, sequenceid=196, filesize=11.9 K 2024-12-17T12:39:55,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/27d1631cde274d5e9e03de90f6ae2df8 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/27d1631cde274d5e9e03de90f6ae2df8 2024-12-17T12:39:55,716 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/27d1631cde274d5e9e03de90f6ae2df8, entries=150, sequenceid=196, filesize=11.9 K 2024-12-17T12:39:55,717 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 224d188997b94bb7d93c906d2c2bf845 in 1241ms, sequenceid=196, compaction requested=true 2024-12-17T12:39:55,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2538): Flush status journal for 224d188997b94bb7d93c906d2c2bf845: 2024-12-17T12:39:55,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:55,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=123 2024-12-17T12:39:55,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4106): Remote procedure done, pid=123 2024-12-17T12:39:55,719 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=123, resume processing ppid=122 2024-12-17T12:39:55,719 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, ppid=122, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6110 sec 2024-12-17T12:39:55,720 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees in 2.6170 sec 2024-12-17T12:39:55,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 224d188997b94bb7d93c906d2c2bf845 2024-12-17T12:39:55,999 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 224d188997b94bb7d93c906d2c2bf845 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-17T12:39:55,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 224d188997b94bb7d93c906d2c2bf845, store=A 2024-12-17T12:39:55,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:55,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 224d188997b94bb7d93c906d2c2bf845, store=B 2024-12-17T12:39:56,000 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:56,000 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 224d188997b94bb7d93c906d2c2bf845, store=C 2024-12-17T12:39:56,000 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:56,003 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/448419a71ec343de82258770c2401755 is 50, key is test_row_0/A:col10/1734439195998/Put/seqid=0 2024-12-17T12:39:56,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742265_1441 (size=16931) 2024-12-17T12:39:56,036 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:56,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439256029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:56,036 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:56,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439256030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:56,037 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:56,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439256031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:56,037 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:56,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439256033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:56,140 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:56,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439256137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:56,141 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:56,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439256137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:56,141 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:56,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439256137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:56,141 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:56,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439256138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:56,344 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:56,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439256341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:56,345 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:56,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439256342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:56,345 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:56,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439256343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:56,345 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:56,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439256343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:56,406 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/448419a71ec343de82258770c2401755 2024-12-17T12:39:56,412 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/ac950479a6f04a4b9759041425a872b2 is 50, key is test_row_0/B:col10/1734439195998/Put/seqid=0 2024-12-17T12:39:56,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742266_1442 (size=12151) 2024-12-17T12:39:56,647 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:56,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439256645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:56,649 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:56,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439256646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:56,650 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:56,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439256647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:56,652 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:56,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439256648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:56,816 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/ac950479a6f04a4b9759041425a872b2 2024-12-17T12:39:56,821 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/9e81199824a74431ab1a923ed3706f2f is 50, key is test_row_0/C:col10/1734439195998/Put/seqid=0 2024-12-17T12:39:56,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742267_1443 (size=12151) 2024-12-17T12:39:57,152 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:57,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439257149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:57,154 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:57,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439257151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:57,158 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:57,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439257154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:57,158 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:57,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439257155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:57,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-17T12:39:57,208 INFO [Thread-1819 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 122 completed 2024-12-17T12:39:57,209 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-17T12:39:57,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=124, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees 2024-12-17T12:39:57,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-17T12:39:57,211 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=124, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-17T12:39:57,211 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=124, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-17T12:39:57,211 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-17T12:39:57,225 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/9e81199824a74431ab1a923ed3706f2f 2024-12-17T12:39:57,228 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/448419a71ec343de82258770c2401755 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/448419a71ec343de82258770c2401755 2024-12-17T12:39:57,230 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/448419a71ec343de82258770c2401755, entries=250, sequenceid=210, filesize=16.5 K 2024-12-17T12:39:57,231 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/ac950479a6f04a4b9759041425a872b2 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/ac950479a6f04a4b9759041425a872b2 2024-12-17T12:39:57,233 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/ac950479a6f04a4b9759041425a872b2, entries=150, sequenceid=210, filesize=11.9 K 2024-12-17T12:39:57,234 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/9e81199824a74431ab1a923ed3706f2f as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/9e81199824a74431ab1a923ed3706f2f 2024-12-17T12:39:57,236 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/9e81199824a74431ab1a923ed3706f2f, entries=150, sequenceid=210, filesize=11.9 K 2024-12-17T12:39:57,237 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 224d188997b94bb7d93c906d2c2bf845 in 1239ms, sequenceid=210, compaction requested=true 2024-12-17T12:39:57,237 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 224d188997b94bb7d93c906d2c2bf845: 2024-12-17T12:39:57,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 224d188997b94bb7d93c906d2c2bf845:A, priority=-2147483648, current under compaction store size is 1 2024-12-17T12:39:57,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:39:57,237 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-17T12:39:57,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 224d188997b94bb7d93c906d2c2bf845:B, priority=-2147483648, current under compaction store size is 2 2024-12-17T12:39:57,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:39:57,238 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 224d188997b94bb7d93c906d2c2bf845:C, priority=-2147483648, current under compaction store size is 3 2024-12-17T12:39:57,238 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-17T12:39:57,238 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:39:57,238 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 53726 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-17T12:39:57,238 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48946 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-17T12:39:57,238 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 224d188997b94bb7d93c906d2c2bf845/B is initiating minor compaction (all files) 2024-12-17T12:39:57,238 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): 224d188997b94bb7d93c906d2c2bf845/A is initiating minor compaction (all files) 2024-12-17T12:39:57,238 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 224d188997b94bb7d93c906d2c2bf845/A in TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:57,238 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 224d188997b94bb7d93c906d2c2bf845/B in TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:57,238 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/2240b705696742ac9f6f54871d724dcb, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/fdf0f8b8ad8246eeaa94a2330c96ff89, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/86ae051e1553470893e7b47c6a2cf0f1, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/448419a71ec343de82258770c2401755] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp, totalSize=52.5 K 2024-12-17T12:39:57,238 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/7c8d9acce68343d0b008e0e27935ea5d, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/d784e3fadd2944f88e9b9ac70193c130, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/4c0deb9657494b67baa4b03c4d0d8a7e, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/ac950479a6f04a4b9759041425a872b2] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp, totalSize=47.8 K 2024-12-17T12:39:57,239 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2240b705696742ac9f6f54871d724dcb, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1734439191073 2024-12-17T12:39:57,239 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 7c8d9acce68343d0b008e0e27935ea5d, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1734439191073 2024-12-17T12:39:57,239 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting d784e3fadd2944f88e9b9ac70193c130, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1734439191433 2024-12-17T12:39:57,239 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting fdf0f8b8ad8246eeaa94a2330c96ff89, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1734439191433 2024-12-17T12:39:57,239 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 4c0deb9657494b67baa4b03c4d0d8a7e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1734439193248 2024-12-17T12:39:57,239 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 86ae051e1553470893e7b47c6a2cf0f1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1734439193248 2024-12-17T12:39:57,239 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting ac950479a6f04a4b9759041425a872b2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1734439195388 2024-12-17T12:39:57,239 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 448419a71ec343de82258770c2401755, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1734439195388 2024-12-17T12:39:57,245 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 224d188997b94bb7d93c906d2c2bf845#B#compaction#371 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:39:57,245 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/e3588a1f8c64487f84cd1d2255db9405 is 50, key is test_row_0/B:col10/1734439195998/Put/seqid=0 2024-12-17T12:39:57,247 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 224d188997b94bb7d93c906d2c2bf845#A#compaction#372 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:39:57,247 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/1cba7ac349e740e3a102c673f68e11c3 is 50, key is test_row_0/A:col10/1734439195998/Put/seqid=0 2024-12-17T12:39:57,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742268_1444 (size=12629) 2024-12-17T12:39:57,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742269_1445 (size=12629) 2024-12-17T12:39:57,257 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/1cba7ac349e740e3a102c673f68e11c3 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/1cba7ac349e740e3a102c673f68e11c3 2024-12-17T12:39:57,259 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 224d188997b94bb7d93c906d2c2bf845/A of 224d188997b94bb7d93c906d2c2bf845 into 1cba7ac349e740e3a102c673f68e11c3(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:39:57,260 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 224d188997b94bb7d93c906d2c2bf845: 2024-12-17T12:39:57,260 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845., storeName=224d188997b94bb7d93c906d2c2bf845/A, priority=12, startTime=1734439197237; duration=0sec 2024-12-17T12:39:57,260 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:39:57,260 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 224d188997b94bb7d93c906d2c2bf845:A 2024-12-17T12:39:57,260 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-17T12:39:57,261 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48946 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-17T12:39:57,261 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): 224d188997b94bb7d93c906d2c2bf845/C is initiating minor compaction (all files) 2024-12-17T12:39:57,261 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 224d188997b94bb7d93c906d2c2bf845/C in TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:57,261 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/20f7a9846b13490bba02eace5354aae4, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/34859c6cc9784157ae1fab3c38e239ea, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/27d1631cde274d5e9e03de90f6ae2df8, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/9e81199824a74431ab1a923ed3706f2f] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp, totalSize=47.8 K 2024-12-17T12:39:57,261 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 20f7a9846b13490bba02eace5354aae4, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1734439191073 2024-12-17T12:39:57,261 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 34859c6cc9784157ae1fab3c38e239ea, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1734439191433 2024-12-17T12:39:57,261 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 27d1631cde274d5e9e03de90f6ae2df8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1734439193248 2024-12-17T12:39:57,262 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9e81199824a74431ab1a923ed3706f2f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1734439195388 2024-12-17T12:39:57,267 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 224d188997b94bb7d93c906d2c2bf845#C#compaction#373 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:39:57,267 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/05342c7c12154fff85be7795c2335657 is 50, key is test_row_0/C:col10/1734439195998/Put/seqid=0 2024-12-17T12:39:57,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742270_1446 (size=12629) 2024-12-17T12:39:57,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-17T12:39:57,362 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:57,362 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-17T12:39:57,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:57,363 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2837): Flushing 224d188997b94bb7d93c906d2c2bf845 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-17T12:39:57,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 224d188997b94bb7d93c906d2c2bf845, store=A 2024-12-17T12:39:57,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:57,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 224d188997b94bb7d93c906d2c2bf845, store=B 2024-12-17T12:39:57,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:57,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 224d188997b94bb7d93c906d2c2bf845, store=C 2024-12-17T12:39:57,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:57,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/c6353bdaacb845ab869b856ef49b49d8 is 50, key is test_row_0/A:col10/1734439196015/Put/seqid=0 2024-12-17T12:39:57,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742271_1447 (size=12151) 2024-12-17T12:39:57,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-17T12:39:57,656 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/e3588a1f8c64487f84cd1d2255db9405 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/e3588a1f8c64487f84cd1d2255db9405 2024-12-17T12:39:57,659 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 224d188997b94bb7d93c906d2c2bf845/B of 224d188997b94bb7d93c906d2c2bf845 into e3588a1f8c64487f84cd1d2255db9405(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:39:57,659 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 224d188997b94bb7d93c906d2c2bf845: 2024-12-17T12:39:57,659 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845., storeName=224d188997b94bb7d93c906d2c2bf845/B, priority=12, startTime=1734439197237; duration=0sec 2024-12-17T12:39:57,659 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:39:57,659 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 224d188997b94bb7d93c906d2c2bf845:B 2024-12-17T12:39:57,682 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/05342c7c12154fff85be7795c2335657 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/05342c7c12154fff85be7795c2335657 2024-12-17T12:39:57,686 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 224d188997b94bb7d93c906d2c2bf845/C of 224d188997b94bb7d93c906d2c2bf845 into 05342c7c12154fff85be7795c2335657(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:39:57,686 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 224d188997b94bb7d93c906d2c2bf845: 2024-12-17T12:39:57,686 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845., storeName=224d188997b94bb7d93c906d2c2bf845/C, priority=12, startTime=1734439197237; duration=0sec 2024-12-17T12:39:57,686 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:39:57,686 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 224d188997b94bb7d93c906d2c2bf845:C 2024-12-17T12:39:57,771 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/c6353bdaacb845ab869b856ef49b49d8 2024-12-17T12:39:57,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/32ff6925b75946f2b269fe2cabe3706b is 50, key is test_row_0/B:col10/1734439196015/Put/seqid=0 2024-12-17T12:39:57,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742272_1448 (size=12151) 2024-12-17T12:39:57,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-17T12:39:58,159 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. as already flushing 2024-12-17T12:39:58,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 224d188997b94bb7d93c906d2c2bf845 2024-12-17T12:39:58,177 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:58,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439258170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:58,177 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:58,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439258171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:58,177 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:58,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439258172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:58,179 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:58,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439258176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:58,180 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/32ff6925b75946f2b269fe2cabe3706b 2024-12-17T12:39:58,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/ed46e0a3d58d4100a066efc0e9cc38dd is 50, key is test_row_0/C:col10/1734439196015/Put/seqid=0 2024-12-17T12:39:58,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742273_1449 (size=12151) 2024-12-17T12:39:58,279 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:58,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439258278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:58,279 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:58,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439258278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:58,279 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:58,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439258278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:58,283 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:58,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439258280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:58,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-17T12:39:58,482 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:58,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439258480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:58,483 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:58,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439258481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:58,483 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:58,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439258481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:58,487 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:58,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439258485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:58,588 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/ed46e0a3d58d4100a066efc0e9cc38dd 2024-12-17T12:39:58,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/c6353bdaacb845ab869b856ef49b49d8 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/c6353bdaacb845ab869b856ef49b49d8 2024-12-17T12:39:58,594 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/c6353bdaacb845ab869b856ef49b49d8, entries=150, sequenceid=233, filesize=11.9 K 2024-12-17T12:39:58,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/32ff6925b75946f2b269fe2cabe3706b as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/32ff6925b75946f2b269fe2cabe3706b 2024-12-17T12:39:58,598 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/32ff6925b75946f2b269fe2cabe3706b, entries=150, sequenceid=233, filesize=11.9 K 2024-12-17T12:39:58,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/ed46e0a3d58d4100a066efc0e9cc38dd as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/ed46e0a3d58d4100a066efc0e9cc38dd 2024-12-17T12:39:58,602 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/ed46e0a3d58d4100a066efc0e9cc38dd, entries=150, sequenceid=233, filesize=11.9 K 2024-12-17T12:39:58,602 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 224d188997b94bb7d93c906d2c2bf845 in 1240ms, sequenceid=233, compaction requested=false 2024-12-17T12:39:58,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2538): Flush status journal for 224d188997b94bb7d93c906d2c2bf845: 2024-12-17T12:39:58,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:58,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=125 2024-12-17T12:39:58,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4106): Remote procedure done, pid=125 2024-12-17T12:39:58,604 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=125, resume processing ppid=124 2024-12-17T12:39:58,604 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, ppid=124, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3920 sec 2024-12-17T12:39:58,605 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees in 1.3950 sec 2024-12-17T12:39:58,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 224d188997b94bb7d93c906d2c2bf845 2024-12-17T12:39:58,788 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 224d188997b94bb7d93c906d2c2bf845 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-17T12:39:58,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 224d188997b94bb7d93c906d2c2bf845, store=A 2024-12-17T12:39:58,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:58,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 224d188997b94bb7d93c906d2c2bf845, store=B 2024-12-17T12:39:58,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:58,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 224d188997b94bb7d93c906d2c2bf845, store=C 2024-12-17T12:39:58,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:39:58,791 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/15fee472e5c24d189b99d90e05b415ad is 50, key is test_row_0/A:col10/1734439198788/Put/seqid=0 2024-12-17T12:39:58,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742274_1450 (size=16931) 2024-12-17T12:39:58,819 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:58,819 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:58,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439258811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:58,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439258811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:58,820 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:58,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439258815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:58,825 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:58,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439258819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:58,922 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:58,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439258920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:58,923 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:58,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439258920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:58,923 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:58,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439258920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:58,928 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:58,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439258925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:59,125 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:59,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439259123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:59,126 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:59,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439259123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:59,127 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:59,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439259124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:59,132 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:59,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439259129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:59,195 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/15fee472e5c24d189b99d90e05b415ad 2024-12-17T12:39:59,200 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/0abf8d1f17d64213a5c0d6d84ba7efb9 is 50, key is test_row_0/B:col10/1734439198788/Put/seqid=0 2024-12-17T12:39:59,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742275_1451 (size=12151) 2024-12-17T12:39:59,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-17T12:39:59,314 INFO [Thread-1819 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 124 completed 2024-12-17T12:39:59,314 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-17T12:39:59,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=126, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees 2024-12-17T12:39:59,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-17T12:39:59,316 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=126, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-17T12:39:59,316 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=126, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-17T12:39:59,316 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-17T12:39:59,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-17T12:39:59,429 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:59,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439259426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:59,431 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:59,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439259428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:59,431 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:59,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439259428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:59,435 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:59,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439259434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:59,467 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:59,467 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-12-17T12:39:59,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:59,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. as already flushing 2024-12-17T12:39:59,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:59,468 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:59,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:59,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:59,603 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/0abf8d1f17d64213a5c0d6d84ba7efb9 2024-12-17T12:39:59,608 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/d3719b80a0f64a70b9289d2f6e2a460d is 50, key is test_row_0/C:col10/1734439198788/Put/seqid=0 2024-12-17T12:39:59,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742276_1452 (size=12151) 2024-12-17T12:39:59,614 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:59,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33252 deadline: 1734439259612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:59,615 DEBUG [Thread-1811 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8179 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845., hostname=681c08bfdbdf,36491,1734439058372, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor41.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-17T12:39:59,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-17T12:39:59,619 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:59,619 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-12-17T12:39:59,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:59,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. as already flushing 2024-12-17T12:39:59,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:59,620 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:59,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:59,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:59,771 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:59,771 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-12-17T12:39:59,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:59,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. as already flushing 2024-12-17T12:39:59,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:59,772 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:59,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:59,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:59,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-17T12:39:59,923 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:59,924 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-12-17T12:39:59,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:59,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. as already flushing 2024-12-17T12:39:59,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:39:59,924 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:59,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:59,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:39:59,936 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:59,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439259933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:59,936 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:59,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439259934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:59,937 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:59,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439259934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:39:59,940 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:39:59,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439259937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:00,011 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/d3719b80a0f64a70b9289d2f6e2a460d 2024-12-17T12:40:00,015 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/15fee472e5c24d189b99d90e05b415ad as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/15fee472e5c24d189b99d90e05b415ad 2024-12-17T12:40:00,018 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/15fee472e5c24d189b99d90e05b415ad, entries=250, sequenceid=251, filesize=16.5 K 2024-12-17T12:40:00,018 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/0abf8d1f17d64213a5c0d6d84ba7efb9 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/0abf8d1f17d64213a5c0d6d84ba7efb9 2024-12-17T12:40:00,021 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/0abf8d1f17d64213a5c0d6d84ba7efb9, entries=150, sequenceid=251, filesize=11.9 K 2024-12-17T12:40:00,022 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/d3719b80a0f64a70b9289d2f6e2a460d as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/d3719b80a0f64a70b9289d2f6e2a460d 2024-12-17T12:40:00,024 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/d3719b80a0f64a70b9289d2f6e2a460d, entries=150, sequenceid=251, filesize=11.9 K 2024-12-17T12:40:00,025 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 224d188997b94bb7d93c906d2c2bf845 in 1238ms, sequenceid=251, compaction requested=true 2024-12-17T12:40:00,025 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 224d188997b94bb7d93c906d2c2bf845: 2024-12-17T12:40:00,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 224d188997b94bb7d93c906d2c2bf845:A, priority=-2147483648, current under compaction store size is 1 2024-12-17T12:40:00,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:40:00,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 224d188997b94bb7d93c906d2c2bf845:B, priority=-2147483648, current under compaction store size is 2 2024-12-17T12:40:00,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:40:00,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 224d188997b94bb7d93c906d2c2bf845:C, priority=-2147483648, current under compaction store size is 3 2024-12-17T12:40:00,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:40:00,025 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:40:00,025 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:40:00,026 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:40:00,026 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 224d188997b94bb7d93c906d2c2bf845/B is initiating minor compaction (all files) 2024-12-17T12:40:00,026 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 224d188997b94bb7d93c906d2c2bf845/B in TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:40:00,026 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/e3588a1f8c64487f84cd1d2255db9405, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/32ff6925b75946f2b269fe2cabe3706b, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/0abf8d1f17d64213a5c0d6d84ba7efb9] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp, totalSize=36.1 K 2024-12-17T12:40:00,026 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 41711 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:40:00,026 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): 224d188997b94bb7d93c906d2c2bf845/A is initiating minor compaction (all files) 2024-12-17T12:40:00,026 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 224d188997b94bb7d93c906d2c2bf845/A in TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:40:00,026 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/1cba7ac349e740e3a102c673f68e11c3, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/c6353bdaacb845ab869b856ef49b49d8, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/15fee472e5c24d189b99d90e05b415ad] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp, totalSize=40.7 K 2024-12-17T12:40:00,026 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1cba7ac349e740e3a102c673f68e11c3, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1734439195388 2024-12-17T12:40:00,027 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting e3588a1f8c64487f84cd1d2255db9405, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1734439195388 2024-12-17T12:40:00,027 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting c6353bdaacb845ab869b856ef49b49d8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1734439196015 2024-12-17T12:40:00,027 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 32ff6925b75946f2b269fe2cabe3706b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1734439196015 2024-12-17T12:40:00,027 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 15fee472e5c24d189b99d90e05b415ad, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1734439198170 2024-12-17T12:40:00,027 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 0abf8d1f17d64213a5c0d6d84ba7efb9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1734439198175 2024-12-17T12:40:00,033 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 224d188997b94bb7d93c906d2c2bf845#A#compaction#380 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:40:00,033 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/43de4482315744f2b9999105931f744d is 50, key is test_row_0/A:col10/1734439198788/Put/seqid=0 2024-12-17T12:40:00,034 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 224d188997b94bb7d93c906d2c2bf845#B#compaction#381 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:40:00,035 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/672a56ccb71b44dbb5f84d8533845ee7 is 50, key is test_row_0/B:col10/1734439198788/Put/seqid=0 2024-12-17T12:40:00,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742277_1453 (size=12731) 2024-12-17T12:40:00,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742278_1454 (size=12731) 2024-12-17T12:40:00,041 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/672a56ccb71b44dbb5f84d8533845ee7 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/672a56ccb71b44dbb5f84d8533845ee7 2024-12-17T12:40:00,044 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 224d188997b94bb7d93c906d2c2bf845/B of 224d188997b94bb7d93c906d2c2bf845 into 672a56ccb71b44dbb5f84d8533845ee7(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:40:00,044 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 224d188997b94bb7d93c906d2c2bf845: 2024-12-17T12:40:00,044 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845., storeName=224d188997b94bb7d93c906d2c2bf845/B, priority=13, startTime=1734439200025; duration=0sec 2024-12-17T12:40:00,044 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:40:00,044 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 224d188997b94bb7d93c906d2c2bf845:B 2024-12-17T12:40:00,044 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:40:00,045 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:40:00,045 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 224d188997b94bb7d93c906d2c2bf845/C is initiating minor compaction (all files) 2024-12-17T12:40:00,045 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 224d188997b94bb7d93c906d2c2bf845/C in TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:40:00,045 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/05342c7c12154fff85be7795c2335657, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/ed46e0a3d58d4100a066efc0e9cc38dd, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/d3719b80a0f64a70b9289d2f6e2a460d] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp, totalSize=36.1 K 2024-12-17T12:40:00,045 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 05342c7c12154fff85be7795c2335657, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1734439195388 2024-12-17T12:40:00,046 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting ed46e0a3d58d4100a066efc0e9cc38dd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1734439196015 2024-12-17T12:40:00,046 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting d3719b80a0f64a70b9289d2f6e2a460d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1734439198175 2024-12-17T12:40:00,049 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 224d188997b94bb7d93c906d2c2bf845#C#compaction#382 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:40:00,050 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/2e9803bb22dd41329ec4ff2223a86330 is 50, key is test_row_0/C:col10/1734439198788/Put/seqid=0 2024-12-17T12:40:00,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742279_1455 (size=12731) 2024-12-17T12:40:00,075 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:00,076 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-12-17T12:40:00,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:40:00,076 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2837): Flushing 224d188997b94bb7d93c906d2c2bf845 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-17T12:40:00,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 224d188997b94bb7d93c906d2c2bf845, store=A 2024-12-17T12:40:00,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:00,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 224d188997b94bb7d93c906d2c2bf845, store=B 2024-12-17T12:40:00,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:00,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 224d188997b94bb7d93c906d2c2bf845, store=C 2024-12-17T12:40:00,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:00,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/b723cccfdb644ffa9566959c7afe1cfb is 50, key is test_row_0/A:col10/1734439198810/Put/seqid=0 2024-12-17T12:40:00,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742280_1456 (size=12301) 2024-12-17T12:40:00,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-17T12:40:00,440 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/43de4482315744f2b9999105931f744d as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/43de4482315744f2b9999105931f744d 2024-12-17T12:40:00,444 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 224d188997b94bb7d93c906d2c2bf845/A of 224d188997b94bb7d93c906d2c2bf845 into 43de4482315744f2b9999105931f744d(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:40:00,444 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 224d188997b94bb7d93c906d2c2bf845: 2024-12-17T12:40:00,444 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845., storeName=224d188997b94bb7d93c906d2c2bf845/A, priority=13, startTime=1734439200025; duration=0sec 2024-12-17T12:40:00,444 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:40:00,444 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 224d188997b94bb7d93c906d2c2bf845:A 2024-12-17T12:40:00,456 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/2e9803bb22dd41329ec4ff2223a86330 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/2e9803bb22dd41329ec4ff2223a86330 2024-12-17T12:40:00,461 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 224d188997b94bb7d93c906d2c2bf845/C of 224d188997b94bb7d93c906d2c2bf845 into 2e9803bb22dd41329ec4ff2223a86330(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:40:00,461 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 224d188997b94bb7d93c906d2c2bf845: 2024-12-17T12:40:00,461 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845., storeName=224d188997b94bb7d93c906d2c2bf845/C, priority=13, startTime=1734439200025; duration=0sec 2024-12-17T12:40:00,461 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:40:00,461 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 224d188997b94bb7d93c906d2c2bf845:C 2024-12-17T12:40:00,485 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=272 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/b723cccfdb644ffa9566959c7afe1cfb 2024-12-17T12:40:00,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/a229a894f61d4e38aa011e9786147ae0 is 50, key is test_row_0/B:col10/1734439198810/Put/seqid=0 2024-12-17T12:40:00,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742281_1457 (size=12301) 2024-12-17T12:40:00,897 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=272 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/a229a894f61d4e38aa011e9786147ae0 2024-12-17T12:40:00,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/773fe73f47a14f82bfce015c948b22f6 is 50, key is test_row_0/C:col10/1734439198810/Put/seqid=0 2024-12-17T12:40:00,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742282_1458 (size=12301) 2024-12-17T12:40:00,940 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. as already flushing 2024-12-17T12:40:00,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 224d188997b94bb7d93c906d2c2bf845 2024-12-17T12:40:00,956 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:00,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439260953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:00,956 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:00,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439260953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:00,957 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:00,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439260954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:00,958 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:00,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439260956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:01,057 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:01,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439261057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:01,058 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:01,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439261057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:01,060 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:01,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439261058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:01,060 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:01,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439261058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:01,260 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:01,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439261259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:01,262 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:01,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439261260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:01,263 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:01,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439261261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:01,264 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:01,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439261262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:01,305 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=272 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/773fe73f47a14f82bfce015c948b22f6 2024-12-17T12:40:01,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/b723cccfdb644ffa9566959c7afe1cfb as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/b723cccfdb644ffa9566959c7afe1cfb 2024-12-17T12:40:01,310 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/b723cccfdb644ffa9566959c7afe1cfb, entries=150, sequenceid=272, filesize=12.0 K 2024-12-17T12:40:01,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/a229a894f61d4e38aa011e9786147ae0 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/a229a894f61d4e38aa011e9786147ae0 2024-12-17T12:40:01,313 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/a229a894f61d4e38aa011e9786147ae0, entries=150, sequenceid=272, filesize=12.0 K 2024-12-17T12:40:01,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/773fe73f47a14f82bfce015c948b22f6 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/773fe73f47a14f82bfce015c948b22f6 2024-12-17T12:40:01,317 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/773fe73f47a14f82bfce015c948b22f6, entries=150, sequenceid=272, filesize=12.0 K 2024-12-17T12:40:01,317 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 224d188997b94bb7d93c906d2c2bf845 in 1241ms, sequenceid=272, compaction requested=false 2024-12-17T12:40:01,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2538): Flush status journal for 224d188997b94bb7d93c906d2c2bf845: 2024-12-17T12:40:01,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:40:01,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=127 2024-12-17T12:40:01,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4106): Remote procedure done, pid=127 2024-12-17T12:40:01,319 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=127, resume processing ppid=126 2024-12-17T12:40:01,319 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, ppid=126, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0020 sec 2024-12-17T12:40:01,320 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees in 2.0050 sec 2024-12-17T12:40:01,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-17T12:40:01,419 INFO [Thread-1819 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 126 completed 2024-12-17T12:40:01,420 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-17T12:40:01,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees 2024-12-17T12:40:01,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-17T12:40:01,421 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-17T12:40:01,421 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-17T12:40:01,421 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=129, ppid=128, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-17T12:40:01,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-17T12:40:01,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 224d188997b94bb7d93c906d2c2bf845 2024-12-17T12:40:01,564 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 224d188997b94bb7d93c906d2c2bf845 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-17T12:40:01,564 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 224d188997b94bb7d93c906d2c2bf845, store=A 2024-12-17T12:40:01,564 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:01,564 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 224d188997b94bb7d93c906d2c2bf845, store=B 2024-12-17T12:40:01,564 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:01,564 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 224d188997b94bb7d93c906d2c2bf845, store=C 2024-12-17T12:40:01,564 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:01,568 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/19bb793835914a7cac739a2ed5d6f360 is 50, key is test_row_0/A:col10/1734439200955/Put/seqid=0 2024-12-17T12:40:01,572 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:01,572 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-17T12:40:01,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:40:01,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. as already flushing 2024-12-17T12:40:01,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:40:01,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742283_1459 (size=14741) 2024-12-17T12:40:01,573 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:01,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:01,573 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/19bb793835914a7cac739a2ed5d6f360 2024-12-17T12:40:01,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:01,579 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/41e6e48266c44a6982539fe9d8110698 is 50, key is test_row_0/B:col10/1734439200955/Put/seqid=0 2024-12-17T12:40:01,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742284_1460 (size=12301) 2024-12-17T12:40:01,589 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:01,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439261585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:01,590 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:01,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439261585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:01,590 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:01,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439261586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:01,590 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:01,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439261586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:01,693 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:01,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439261690, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:01,693 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:01,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439261690, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:01,694 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:01,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439261691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:01,694 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:01,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439261691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:01,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-17T12:40:01,725 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:01,725 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-17T12:40:01,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:40:01,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. as already flushing 2024-12-17T12:40:01,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:40:01,725 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:01,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:01,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:01,877 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:01,877 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-17T12:40:01,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:40:01,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. as already flushing 2024-12-17T12:40:01,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:40:01,877 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:01,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:01,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:01,897 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:01,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439261894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:01,897 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:01,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439261895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:01,897 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:01,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439261895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:01,897 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:01,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439261895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:01,983 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/41e6e48266c44a6982539fe9d8110698 2024-12-17T12:40:01,988 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/004bdd3074884b989d8c70bdfec12d83 is 50, key is test_row_0/C:col10/1734439200955/Put/seqid=0 2024-12-17T12:40:01,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742285_1461 (size=12301) 2024-12-17T12:40:02,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-17T12:40:02,029 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:02,029 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-17T12:40:02,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:40:02,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. as already flushing 2024-12-17T12:40:02,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:40:02,030 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:02,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:02,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:02,181 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:02,181 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-17T12:40:02,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:40:02,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. as already flushing 2024-12-17T12:40:02,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:40:02,182 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:02,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:02,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:02,201 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:02,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439262198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:02,201 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:02,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439262199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:02,201 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:02,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439262199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:02,201 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:02,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439262199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:02,333 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:02,334 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-17T12:40:02,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:40:02,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. as already flushing 2024-12-17T12:40:02,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:40:02,334 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:02,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:02,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:02,390 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/004bdd3074884b989d8c70bdfec12d83 2024-12-17T12:40:02,393 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/19bb793835914a7cac739a2ed5d6f360 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/19bb793835914a7cac739a2ed5d6f360 2024-12-17T12:40:02,396 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/19bb793835914a7cac739a2ed5d6f360, entries=200, sequenceid=291, filesize=14.4 K 2024-12-17T12:40:02,397 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/41e6e48266c44a6982539fe9d8110698 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/41e6e48266c44a6982539fe9d8110698 2024-12-17T12:40:02,399 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/41e6e48266c44a6982539fe9d8110698, entries=150, sequenceid=291, filesize=12.0 K 2024-12-17T12:40:02,400 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/004bdd3074884b989d8c70bdfec12d83 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/004bdd3074884b989d8c70bdfec12d83 2024-12-17T12:40:02,402 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/004bdd3074884b989d8c70bdfec12d83, entries=150, sequenceid=291, filesize=12.0 K 2024-12-17T12:40:02,403 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 224d188997b94bb7d93c906d2c2bf845 in 839ms, sequenceid=291, compaction requested=true 2024-12-17T12:40:02,403 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 224d188997b94bb7d93c906d2c2bf845: 2024-12-17T12:40:02,403 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 224d188997b94bb7d93c906d2c2bf845:A, priority=-2147483648, current under compaction store size is 1 2024-12-17T12:40:02,403 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:40:02,403 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 224d188997b94bb7d93c906d2c2bf845:B, priority=-2147483648, current under compaction store size is 2 2024-12-17T12:40:02,403 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:40:02,403 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 224d188997b94bb7d93c906d2c2bf845:C, priority=-2147483648, current under compaction store size is 3 2024-12-17T12:40:02,403 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:40:02,403 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:40:02,403 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:40:02,404 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:40:02,404 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39773 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:40:02,404 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 224d188997b94bb7d93c906d2c2bf845/B is initiating minor compaction (all files) 2024-12-17T12:40:02,404 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): 224d188997b94bb7d93c906d2c2bf845/A is initiating minor compaction (all files) 2024-12-17T12:40:02,404 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 224d188997b94bb7d93c906d2c2bf845/B in TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:40:02,404 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 224d188997b94bb7d93c906d2c2bf845/A in TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:40:02,404 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/672a56ccb71b44dbb5f84d8533845ee7, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/a229a894f61d4e38aa011e9786147ae0, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/41e6e48266c44a6982539fe9d8110698] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp, totalSize=36.5 K 2024-12-17T12:40:02,404 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/43de4482315744f2b9999105931f744d, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/b723cccfdb644ffa9566959c7afe1cfb, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/19bb793835914a7cac739a2ed5d6f360] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp, totalSize=38.8 K 2024-12-17T12:40:02,404 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 672a56ccb71b44dbb5f84d8533845ee7, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1734439198175 2024-12-17T12:40:02,404 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 43de4482315744f2b9999105931f744d, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1734439198175 2024-12-17T12:40:02,405 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting a229a894f61d4e38aa011e9786147ae0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=272, earliestPutTs=1734439198810 2024-12-17T12:40:02,405 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting b723cccfdb644ffa9566959c7afe1cfb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=272, earliestPutTs=1734439198810 2024-12-17T12:40:02,405 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 41e6e48266c44a6982539fe9d8110698, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1734439200952 2024-12-17T12:40:02,405 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 19bb793835914a7cac739a2ed5d6f360, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1734439200952 2024-12-17T12:40:02,410 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 224d188997b94bb7d93c906d2c2bf845#A#compaction#389 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:40:02,410 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 224d188997b94bb7d93c906d2c2bf845#B#compaction#390 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:40:02,410 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/34bbac9d2bac4029bd8a64c8b215ecab is 50, key is test_row_0/B:col10/1734439200955/Put/seqid=0 2024-12-17T12:40:02,410 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/86145ea4d33d4770b186ba3198007aee is 50, key is test_row_0/A:col10/1734439200955/Put/seqid=0 2024-12-17T12:40:02,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742286_1462 (size=12983) 2024-12-17T12:40:02,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742287_1463 (size=12983) 2024-12-17T12:40:02,485 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:02,486 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-17T12:40:02,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:40:02,486 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2837): Flushing 224d188997b94bb7d93c906d2c2bf845 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-17T12:40:02,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 224d188997b94bb7d93c906d2c2bf845, store=A 2024-12-17T12:40:02,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:02,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 224d188997b94bb7d93c906d2c2bf845, store=B 2024-12-17T12:40:02,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:02,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 224d188997b94bb7d93c906d2c2bf845, store=C 2024-12-17T12:40:02,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:02,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/56f0bb6d09644f99b612a3668138d7a3 is 50, key is test_row_0/A:col10/1734439201585/Put/seqid=0 2024-12-17T12:40:02,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742288_1464 (size=12301) 2024-12-17T12:40:02,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-17T12:40:02,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 224d188997b94bb7d93c906d2c2bf845 2024-12-17T12:40:02,704 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. as already flushing 2024-12-17T12:40:02,725 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:02,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439262720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:02,725 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:02,725 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:02,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439262720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:02,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439262721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:02,726 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:02,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439262721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:02,788 DEBUG [Thread-1820 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0341384e to 127.0.0.1:59557 2024-12-17T12:40:02,788 DEBUG [Thread-1822 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x26b120d9 to 127.0.0.1:59557 2024-12-17T12:40:02,788 DEBUG [Thread-1820 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:40:02,788 DEBUG [Thread-1822 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:40:02,791 DEBUG [Thread-1824 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4c1ec7ee to 127.0.0.1:59557 2024-12-17T12:40:02,791 DEBUG [Thread-1824 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:40:02,792 DEBUG [Thread-1826 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5ccff4bf to 127.0.0.1:59557 2024-12-17T12:40:02,792 DEBUG [Thread-1826 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:40:02,793 DEBUG [Thread-1828 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x783a99f7 to 127.0.0.1:59557 2024-12-17T12:40:02,793 DEBUG [Thread-1828 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:40:02,821 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/34bbac9d2bac4029bd8a64c8b215ecab as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/34bbac9d2bac4029bd8a64c8b215ecab 2024-12-17T12:40:02,821 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/86145ea4d33d4770b186ba3198007aee as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/86145ea4d33d4770b186ba3198007aee 2024-12-17T12:40:02,827 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:02,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439262826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:02,827 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:02,827 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:02,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439262827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:02,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439262827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:02,827 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 224d188997b94bb7d93c906d2c2bf845/A of 224d188997b94bb7d93c906d2c2bf845 into 86145ea4d33d4770b186ba3198007aee(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:40:02,827 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 224d188997b94bb7d93c906d2c2bf845/B of 224d188997b94bb7d93c906d2c2bf845 into 34bbac9d2bac4029bd8a64c8b215ecab(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:40:02,827 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:02,827 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 224d188997b94bb7d93c906d2c2bf845: 2024-12-17T12:40:02,827 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 224d188997b94bb7d93c906d2c2bf845: 2024-12-17T12:40:02,827 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845., storeName=224d188997b94bb7d93c906d2c2bf845/A, priority=13, startTime=1734439202403; duration=0sec 2024-12-17T12:40:02,827 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845., storeName=224d188997b94bb7d93c906d2c2bf845/B, priority=13, startTime=1734439202403; duration=0sec 2024-12-17T12:40:02,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439262827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:02,827 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:40:02,827 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 224d188997b94bb7d93c906d2c2bf845:A 2024-12-17T12:40:02,828 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:40:02,828 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 224d188997b94bb7d93c906d2c2bf845:B 2024-12-17T12:40:02,828 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:40:02,829 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:40:02,829 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): 224d188997b94bb7d93c906d2c2bf845/C is initiating minor compaction (all files) 2024-12-17T12:40:02,829 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 224d188997b94bb7d93c906d2c2bf845/C in TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:40:02,829 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/2e9803bb22dd41329ec4ff2223a86330, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/773fe73f47a14f82bfce015c948b22f6, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/004bdd3074884b989d8c70bdfec12d83] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp, totalSize=36.5 K 2024-12-17T12:40:02,829 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2e9803bb22dd41329ec4ff2223a86330, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1734439198175 2024-12-17T12:40:02,830 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 773fe73f47a14f82bfce015c948b22f6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=272, earliestPutTs=1734439198810 2024-12-17T12:40:02,830 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 004bdd3074884b989d8c70bdfec12d83, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1734439200952 2024-12-17T12:40:02,835 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 224d188997b94bb7d93c906d2c2bf845#C#compaction#392 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:40:02,835 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/91ff5b00cfbb422d9b20d093ac2bb313 is 50, key is test_row_0/C:col10/1734439200955/Put/seqid=0 2024-12-17T12:40:02,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742289_1465 (size=12983) 2024-12-17T12:40:02,893 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=310 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/56f0bb6d09644f99b612a3668138d7a3 2024-12-17T12:40:02,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/d29c12df7592442bab7c5b36f8e9d18a is 50, key is test_row_0/B:col10/1734439201585/Put/seqid=0 2024-12-17T12:40:02,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742290_1466 (size=12301) 2024-12-17T12:40:03,030 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:03,030 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:03,030 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:03,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439263029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:03,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439263029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:03,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439263029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:03,031 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:03,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439263030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:03,250 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/91ff5b00cfbb422d9b20d093ac2bb313 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/91ff5b00cfbb422d9b20d093ac2bb313 2024-12-17T12:40:03,256 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 224d188997b94bb7d93c906d2c2bf845/C of 224d188997b94bb7d93c906d2c2bf845 into 91ff5b00cfbb422d9b20d093ac2bb313(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:40:03,256 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 224d188997b94bb7d93c906d2c2bf845: 2024-12-17T12:40:03,256 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845., storeName=224d188997b94bb7d93c906d2c2bf845/C, priority=13, startTime=1734439202403; duration=0sec 2024-12-17T12:40:03,256 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:40:03,256 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 224d188997b94bb7d93c906d2c2bf845:C 2024-12-17T12:40:03,309 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=310 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/d29c12df7592442bab7c5b36f8e9d18a 2024-12-17T12:40:03,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/c3deb0046ab844bda37e1abd1058f3a6 is 50, key is test_row_0/C:col10/1734439201585/Put/seqid=0 2024-12-17T12:40:03,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742291_1467 (size=12301) 2024-12-17T12:40:03,332 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:03,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33276 deadline: 1734439263332, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:03,333 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:03,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33294 deadline: 1734439263333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:03,334 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:03,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33262 deadline: 1734439263334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:03,335 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:03,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33308 deadline: 1734439263335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:03,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-17T12:40:03,727 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=310 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/c3deb0046ab844bda37e1abd1058f3a6 2024-12-17T12:40:03,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/56f0bb6d09644f99b612a3668138d7a3 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/56f0bb6d09644f99b612a3668138d7a3 2024-12-17T12:40:03,742 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/56f0bb6d09644f99b612a3668138d7a3, entries=150, sequenceid=310, filesize=12.0 K 2024-12-17T12:40:03,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/d29c12df7592442bab7c5b36f8e9d18a as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/d29c12df7592442bab7c5b36f8e9d18a 2024-12-17T12:40:03,746 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/d29c12df7592442bab7c5b36f8e9d18a, entries=150, sequenceid=310, filesize=12.0 K 2024-12-17T12:40:03,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/c3deb0046ab844bda37e1abd1058f3a6 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/c3deb0046ab844bda37e1abd1058f3a6 2024-12-17T12:40:03,750 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/c3deb0046ab844bda37e1abd1058f3a6, entries=150, sequenceid=310, filesize=12.0 K 2024-12-17T12:40:03,750 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=100.63 KB/103050 for 224d188997b94bb7d93c906d2c2bf845 in 1264ms, sequenceid=310, compaction requested=false 2024-12-17T12:40:03,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2538): Flush status journal for 224d188997b94bb7d93c906d2c2bf845: 2024-12-17T12:40:03,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:40:03,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=129 2024-12-17T12:40:03,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4106): Remote procedure done, pid=129 2024-12-17T12:40:03,752 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=129, resume processing ppid=128 2024-12-17T12:40:03,753 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, ppid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3300 sec 2024-12-17T12:40:03,753 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees in 2.3330 sec 2024-12-17T12:40:03,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 224d188997b94bb7d93c906d2c2bf845 2024-12-17T12:40:03,835 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 224d188997b94bb7d93c906d2c2bf845 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-17T12:40:03,835 DEBUG [Thread-1815 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x22e911df to 127.0.0.1:59557 2024-12-17T12:40:03,835 DEBUG [Thread-1815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:40:03,835 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 224d188997b94bb7d93c906d2c2bf845, store=A 2024-12-17T12:40:03,835 DEBUG [Thread-1817 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3b727d6e to 127.0.0.1:59557 2024-12-17T12:40:03,835 DEBUG [Thread-1817 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:40:03,835 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:03,835 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 224d188997b94bb7d93c906d2c2bf845, store=B 2024-12-17T12:40:03,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:03,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 224d188997b94bb7d93c906d2c2bf845, store=C 2024-12-17T12:40:03,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:03,838 DEBUG [Thread-1813 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2885d2d9 to 127.0.0.1:59557 2024-12-17T12:40:03,838 DEBUG [Thread-1809 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x42e904d8 to 127.0.0.1:59557 2024-12-17T12:40:03,838 DEBUG [Thread-1809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:40:03,838 DEBUG [Thread-1813 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:40:03,840 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/8cbcd1f1202d4ddf93cf93600eb2b4c8 is 50, key is test_row_0/A:col10/1734439202720/Put/seqid=0 2024-12-17T12:40:03,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742292_1468 (size=12301) 2024-12-17T12:40:04,246 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=333 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/8cbcd1f1202d4ddf93cf93600eb2b4c8 2024-12-17T12:40:04,258 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/1849ae0f2bc4428190c95b97cf8afce2 is 50, key is test_row_0/B:col10/1734439202720/Put/seqid=0 2024-12-17T12:40:04,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742293_1469 (size=12301) 2024-12-17T12:40:04,664 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=333 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/1849ae0f2bc4428190c95b97cf8afce2 2024-12-17T12:40:04,679 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/eac04cf21a934607a527836a9f6a1c2d is 50, key is test_row_0/C:col10/1734439202720/Put/seqid=0 2024-12-17T12:40:04,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742294_1470 (size=12301) 2024-12-17T12:40:05,084 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=333 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/eac04cf21a934607a527836a9f6a1c2d 2024-12-17T12:40:05,093 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/8cbcd1f1202d4ddf93cf93600eb2b4c8 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/8cbcd1f1202d4ddf93cf93600eb2b4c8 2024-12-17T12:40:05,098 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/8cbcd1f1202d4ddf93cf93600eb2b4c8, entries=150, sequenceid=333, filesize=12.0 K 2024-12-17T12:40:05,100 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/1849ae0f2bc4428190c95b97cf8afce2 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/1849ae0f2bc4428190c95b97cf8afce2 2024-12-17T12:40:05,104 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/1849ae0f2bc4428190c95b97cf8afce2, entries=150, sequenceid=333, filesize=12.0 K 2024-12-17T12:40:05,104 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/eac04cf21a934607a527836a9f6a1c2d as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/eac04cf21a934607a527836a9f6a1c2d 2024-12-17T12:40:05,108 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/eac04cf21a934607a527836a9f6a1c2d, entries=150, sequenceid=333, filesize=12.0 K 2024-12-17T12:40:05,109 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=13.42 KB/13740 for 224d188997b94bb7d93c906d2c2bf845 in 1275ms, sequenceid=333, compaction requested=true 2024-12-17T12:40:05,109 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 224d188997b94bb7d93c906d2c2bf845: 2024-12-17T12:40:05,109 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 224d188997b94bb7d93c906d2c2bf845:A, priority=-2147483648, current under compaction store size is 1 2024-12-17T12:40:05,109 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:40:05,109 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 224d188997b94bb7d93c906d2c2bf845:B, priority=-2147483648, current under compaction store size is 2 2024-12-17T12:40:05,109 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:40:05,109 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:40:05,109 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 224d188997b94bb7d93c906d2c2bf845:C, priority=-2147483648, current under compaction store size is 3 2024-12-17T12:40:05,109 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:40:05,109 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:40:05,110 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:40:05,110 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:40:05,110 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 224d188997b94bb7d93c906d2c2bf845/B is initiating minor compaction (all files) 2024-12-17T12:40:05,110 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): 224d188997b94bb7d93c906d2c2bf845/A is initiating minor compaction (all files) 2024-12-17T12:40:05,110 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 224d188997b94bb7d93c906d2c2bf845/B in TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:40:05,110 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 224d188997b94bb7d93c906d2c2bf845/A in TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:40:05,110 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/34bbac9d2bac4029bd8a64c8b215ecab, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/d29c12df7592442bab7c5b36f8e9d18a, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/1849ae0f2bc4428190c95b97cf8afce2] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp, totalSize=36.7 K 2024-12-17T12:40:05,110 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/86145ea4d33d4770b186ba3198007aee, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/56f0bb6d09644f99b612a3668138d7a3, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/8cbcd1f1202d4ddf93cf93600eb2b4c8] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp, totalSize=36.7 K 2024-12-17T12:40:05,110 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 34bbac9d2bac4029bd8a64c8b215ecab, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1734439200952 2024-12-17T12:40:05,111 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 86145ea4d33d4770b186ba3198007aee, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1734439200952 2024-12-17T12:40:05,111 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting d29c12df7592442bab7c5b36f8e9d18a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=310, earliestPutTs=1734439201585 2024-12-17T12:40:05,111 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 56f0bb6d09644f99b612a3668138d7a3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=310, earliestPutTs=1734439201585 2024-12-17T12:40:05,111 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 1849ae0f2bc4428190c95b97cf8afce2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1734439202720 2024-12-17T12:40:05,111 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8cbcd1f1202d4ddf93cf93600eb2b4c8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1734439202720 2024-12-17T12:40:05,121 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 224d188997b94bb7d93c906d2c2bf845#B#compaction#399 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:40:05,121 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 224d188997b94bb7d93c906d2c2bf845#A#compaction#398 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:40:05,122 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/46211c8345524cec8da4acbe43c1dbb6 is 50, key is test_row_0/A:col10/1734439202720/Put/seqid=0 2024-12-17T12:40:05,122 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/61538d3c2fee4c7183249c11e68c9561 is 50, key is test_row_0/B:col10/1734439202720/Put/seqid=0 2024-12-17T12:40:05,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742295_1471 (size=13085) 2024-12-17T12:40:05,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742296_1472 (size=13085) 2024-12-17T12:40:05,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-17T12:40:05,528 INFO [Thread-1819 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 128 completed 2024-12-17T12:40:05,537 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/46211c8345524cec8da4acbe43c1dbb6 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/46211c8345524cec8da4acbe43c1dbb6 2024-12-17T12:40:05,537 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/61538d3c2fee4c7183249c11e68c9561 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/61538d3c2fee4c7183249c11e68c9561 2024-12-17T12:40:05,541 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 224d188997b94bb7d93c906d2c2bf845/B of 224d188997b94bb7d93c906d2c2bf845 into 61538d3c2fee4c7183249c11e68c9561(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:40:05,541 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 224d188997b94bb7d93c906d2c2bf845/A of 224d188997b94bb7d93c906d2c2bf845 into 46211c8345524cec8da4acbe43c1dbb6(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:40:05,542 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 224d188997b94bb7d93c906d2c2bf845: 2024-12-17T12:40:05,542 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 224d188997b94bb7d93c906d2c2bf845: 2024-12-17T12:40:05,542 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845., storeName=224d188997b94bb7d93c906d2c2bf845/B, priority=13, startTime=1734439205109; duration=0sec 2024-12-17T12:40:05,542 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845., storeName=224d188997b94bb7d93c906d2c2bf845/A, priority=13, startTime=1734439205109; duration=0sec 2024-12-17T12:40:05,542 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:40:05,542 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 224d188997b94bb7d93c906d2c2bf845:B 2024-12-17T12:40:05,542 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:40:05,542 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:40:05,542 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 224d188997b94bb7d93c906d2c2bf845:A 2024-12-17T12:40:05,543 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:40:05,543 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 224d188997b94bb7d93c906d2c2bf845/C is initiating minor compaction (all files) 2024-12-17T12:40:05,543 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 224d188997b94bb7d93c906d2c2bf845/C in TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:40:05,543 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/91ff5b00cfbb422d9b20d093ac2bb313, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/c3deb0046ab844bda37e1abd1058f3a6, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/eac04cf21a934607a527836a9f6a1c2d] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp, totalSize=36.7 K 2024-12-17T12:40:05,543 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 91ff5b00cfbb422d9b20d093ac2bb313, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1734439200952 2024-12-17T12:40:05,544 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting c3deb0046ab844bda37e1abd1058f3a6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=310, earliestPutTs=1734439201585 2024-12-17T12:40:05,544 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting eac04cf21a934607a527836a9f6a1c2d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1734439202720 2024-12-17T12:40:05,550 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 224d188997b94bb7d93c906d2c2bf845#C#compaction#400 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:40:05,551 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/24274853fb194a24a791f1ceff128198 is 50, key is test_row_0/C:col10/1734439202720/Put/seqid=0 2024-12-17T12:40:05,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742297_1473 (size=13085) 2024-12-17T12:40:05,965 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/24274853fb194a24a791f1ceff128198 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/24274853fb194a24a791f1ceff128198 2024-12-17T12:40:05,970 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 224d188997b94bb7d93c906d2c2bf845/C of 224d188997b94bb7d93c906d2c2bf845 into 24274853fb194a24a791f1ceff128198(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:40:05,970 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 224d188997b94bb7d93c906d2c2bf845: 2024-12-17T12:40:05,970 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845., storeName=224d188997b94bb7d93c906d2c2bf845/C, priority=13, startTime=1734439205109; duration=0sec 2024-12-17T12:40:05,970 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:40:05,970 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 224d188997b94bb7d93c906d2c2bf845:C 2024-12-17T12:40:06,886 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-17T12:40:09,641 DEBUG [Thread-1811 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7c5c4716 to 127.0.0.1:59557 2024-12-17T12:40:09,642 DEBUG [Thread-1811 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:40:09,642 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-17T12:40:09,642 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 58 2024-12-17T12:40:09,642 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 38 2024-12-17T12:40:09,642 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 56 2024-12-17T12:40:09,643 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 58 2024-12-17T12:40:09,643 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 56 2024-12-17T12:40:09,643 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-17T12:40:09,643 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-17T12:40:09,643 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3515 2024-12-17T12:40:09,643 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 10543 rows 2024-12-17T12:40:09,643 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3496 2024-12-17T12:40:09,643 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 10487 rows 2024-12-17T12:40:09,643 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3528 2024-12-17T12:40:09,643 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 10580 rows 2024-12-17T12:40:09,643 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3529 2024-12-17T12:40:09,643 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 10586 rows 2024-12-17T12:40:09,643 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3499 2024-12-17T12:40:09,643 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 10497 rows 2024-12-17T12:40:09,643 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-17T12:40:09,644 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x190853fc to 127.0.0.1:59557 2024-12-17T12:40:09,644 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:40:09,649 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-17T12:40:09,649 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-17T12:40:09,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=130, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-17T12:40:09,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-17T12:40:09,652 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734439209652"}]},"ts":"1734439209652"} 2024-12-17T12:40:09,653 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-17T12:40:09,699 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-17T12:40:09,700 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=131, ppid=130, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-17T12:40:09,702 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=132, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=224d188997b94bb7d93c906d2c2bf845, UNASSIGN}] 2024-12-17T12:40:09,703 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=132, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=224d188997b94bb7d93c906d2c2bf845, UNASSIGN 2024-12-17T12:40:09,704 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=132 updating hbase:meta row=224d188997b94bb7d93c906d2c2bf845, regionState=CLOSING, regionLocation=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:09,705 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-17T12:40:09,706 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE; CloseRegionProcedure 224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372}] 2024-12-17T12:40:09,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-17T12:40:09,858 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:09,859 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] handler.UnassignRegionHandler(124): Close 224d188997b94bb7d93c906d2c2bf845 2024-12-17T12:40:09,859 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-17T12:40:09,859 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1681): Closing 224d188997b94bb7d93c906d2c2bf845, disabling compactions & flushes 2024-12-17T12:40:09,860 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:40:09,860 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:40:09,860 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. after waiting 0 ms 2024-12-17T12:40:09,860 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:40:09,860 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(2837): Flushing 224d188997b94bb7d93c906d2c2bf845 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-12-17T12:40:09,860 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 224d188997b94bb7d93c906d2c2bf845, store=A 2024-12-17T12:40:09,861 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:09,861 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 224d188997b94bb7d93c906d2c2bf845, store=B 2024-12-17T12:40:09,861 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:09,861 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 224d188997b94bb7d93c906d2c2bf845, store=C 2024-12-17T12:40:09,861 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:09,867 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/6104a131ba4347b49e233e1fb9cb01df is 50, key is test_row_0/A:col10/1734439203837/Put/seqid=0 2024-12-17T12:40:09,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742298_1474 (size=12301) 2024-12-17T12:40:09,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-17T12:40:10,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-17T12:40:10,273 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=342 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/6104a131ba4347b49e233e1fb9cb01df 2024-12-17T12:40:10,285 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/d705b404816c4b3d9714dc3a0a191f94 is 50, key is test_row_0/B:col10/1734439203837/Put/seqid=0 2024-12-17T12:40:10,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742299_1475 (size=12301) 2024-12-17T12:40:10,691 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=342 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/d705b404816c4b3d9714dc3a0a191f94 2024-12-17T12:40:10,703 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/53e82fedf67846b4a33a603ce9cb006a is 50, key is test_row_0/C:col10/1734439203837/Put/seqid=0 2024-12-17T12:40:10,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742300_1476 (size=12301) 2024-12-17T12:40:10,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-17T12:40:11,108 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=342 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/53e82fedf67846b4a33a603ce9cb006a 2024-12-17T12:40:11,119 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/A/6104a131ba4347b49e233e1fb9cb01df as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/6104a131ba4347b49e233e1fb9cb01df 2024-12-17T12:40:11,125 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/6104a131ba4347b49e233e1fb9cb01df, entries=150, sequenceid=342, filesize=12.0 K 2024-12-17T12:40:11,126 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/B/d705b404816c4b3d9714dc3a0a191f94 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/d705b404816c4b3d9714dc3a0a191f94 2024-12-17T12:40:11,130 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/d705b404816c4b3d9714dc3a0a191f94, entries=150, sequenceid=342, filesize=12.0 K 2024-12-17T12:40:11,131 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/.tmp/C/53e82fedf67846b4a33a603ce9cb006a as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/53e82fedf67846b4a33a603ce9cb006a 2024-12-17T12:40:11,135 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/53e82fedf67846b4a33a603ce9cb006a, entries=150, sequenceid=342, filesize=12.0 K 2024-12-17T12:40:11,136 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=0 B/0 for 224d188997b94bb7d93c906d2c2bf845 in 1276ms, sequenceid=342, compaction requested=false 2024-12-17T12:40:11,136 DEBUG [StoreCloser-TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/a87fa336362c4c549be8cdcd87a3a2d1, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/33a062d3b0424d658c01bff6ba8a9418, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/8d2fa36e38914f0ba9708d585fd7db79, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/ec832ffafd0f4618be3cde89daba1863, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/d0469768b89448faa13e43a15571d1cf, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/90e5e08f76e3457f8dd43ec6d45ce580, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/16496e7c2b3344189cd3a6a89575dca4, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/222a84a8c61a4488ae779a341aca1b3f, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/35b3a5b80fef445b91d002645010a799, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/2240b705696742ac9f6f54871d724dcb, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/fcff3fbc84e040d38ed0d7dd0707f1a8, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/fdf0f8b8ad8246eeaa94a2330c96ff89, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/86ae051e1553470893e7b47c6a2cf0f1, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/448419a71ec343de82258770c2401755, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/1cba7ac349e740e3a102c673f68e11c3, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/c6353bdaacb845ab869b856ef49b49d8, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/15fee472e5c24d189b99d90e05b415ad, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/43de4482315744f2b9999105931f744d, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/b723cccfdb644ffa9566959c7afe1cfb, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/19bb793835914a7cac739a2ed5d6f360, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/86145ea4d33d4770b186ba3198007aee, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/56f0bb6d09644f99b612a3668138d7a3, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/8cbcd1f1202d4ddf93cf93600eb2b4c8] to archive 2024-12-17T12:40:11,137 DEBUG [StoreCloser-TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-17T12:40:11,139 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/a87fa336362c4c549be8cdcd87a3a2d1 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/a87fa336362c4c549be8cdcd87a3a2d1 2024-12-17T12:40:11,139 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/33a062d3b0424d658c01bff6ba8a9418 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/33a062d3b0424d658c01bff6ba8a9418 2024-12-17T12:40:11,139 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/ec832ffafd0f4618be3cde89daba1863 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/ec832ffafd0f4618be3cde89daba1863 2024-12-17T12:40:11,139 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/8d2fa36e38914f0ba9708d585fd7db79 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/8d2fa36e38914f0ba9708d585fd7db79 2024-12-17T12:40:11,140 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/d0469768b89448faa13e43a15571d1cf to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/d0469768b89448faa13e43a15571d1cf 2024-12-17T12:40:11,140 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/35b3a5b80fef445b91d002645010a799 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/35b3a5b80fef445b91d002645010a799 2024-12-17T12:40:11,140 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/2240b705696742ac9f6f54871d724dcb to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/2240b705696742ac9f6f54871d724dcb 2024-12-17T12:40:11,141 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/fcff3fbc84e040d38ed0d7dd0707f1a8 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/fcff3fbc84e040d38ed0d7dd0707f1a8 2024-12-17T12:40:11,141 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/222a84a8c61a4488ae779a341aca1b3f to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/222a84a8c61a4488ae779a341aca1b3f 2024-12-17T12:40:11,141 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/16496e7c2b3344189cd3a6a89575dca4 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/16496e7c2b3344189cd3a6a89575dca4 2024-12-17T12:40:11,140 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/90e5e08f76e3457f8dd43ec6d45ce580 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/90e5e08f76e3457f8dd43ec6d45ce580 2024-12-17T12:40:11,141 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/fdf0f8b8ad8246eeaa94a2330c96ff89 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/fdf0f8b8ad8246eeaa94a2330c96ff89 2024-12-17T12:40:11,142 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/448419a71ec343de82258770c2401755 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/448419a71ec343de82258770c2401755 2024-12-17T12:40:11,142 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/1cba7ac349e740e3a102c673f68e11c3 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/1cba7ac349e740e3a102c673f68e11c3 2024-12-17T12:40:11,142 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/86ae051e1553470893e7b47c6a2cf0f1 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/86ae051e1553470893e7b47c6a2cf0f1 2024-12-17T12:40:11,142 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/c6353bdaacb845ab869b856ef49b49d8 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/c6353bdaacb845ab869b856ef49b49d8 2024-12-17T12:40:11,142 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/43de4482315744f2b9999105931f744d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/43de4482315744f2b9999105931f744d 2024-12-17T12:40:11,143 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/b723cccfdb644ffa9566959c7afe1cfb to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/b723cccfdb644ffa9566959c7afe1cfb 2024-12-17T12:40:11,143 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/19bb793835914a7cac739a2ed5d6f360 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/19bb793835914a7cac739a2ed5d6f360 2024-12-17T12:40:11,143 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/56f0bb6d09644f99b612a3668138d7a3 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/56f0bb6d09644f99b612a3668138d7a3 2024-12-17T12:40:11,143 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/86145ea4d33d4770b186ba3198007aee to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/86145ea4d33d4770b186ba3198007aee 2024-12-17T12:40:11,143 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/8cbcd1f1202d4ddf93cf93600eb2b4c8 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/8cbcd1f1202d4ddf93cf93600eb2b4c8 2024-12-17T12:40:11,143 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/15fee472e5c24d189b99d90e05b415ad to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/15fee472e5c24d189b99d90e05b415ad 2024-12-17T12:40:11,148 DEBUG [StoreCloser-TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/56006a5a86b349709635719d584e9db1, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/e7a7c90af26b4c75a8bbc8a780367a4e, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/276f2fc55a624f508c49cc328562b3a8, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/55f9c369db4342baaa8e955e1b3f6823, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/7d0031d838514968a52058de8a040799, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/1b680e3efd5d48b2a1cded5f9a3161a5, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/6cb2d1a04d53475dbbad3311491bbd72, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/f46dbdaebd9441fa929d3f9046bb8ffe, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/f85bf8efec7941779918a15108160606, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/7c8d9acce68343d0b008e0e27935ea5d, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/1971ae45c0ed43469fa2295c48d9c13f, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/d784e3fadd2944f88e9b9ac70193c130, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/4c0deb9657494b67baa4b03c4d0d8a7e, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/e3588a1f8c64487f84cd1d2255db9405, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/ac950479a6f04a4b9759041425a872b2, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/32ff6925b75946f2b269fe2cabe3706b, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/672a56ccb71b44dbb5f84d8533845ee7, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/0abf8d1f17d64213a5c0d6d84ba7efb9, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/a229a894f61d4e38aa011e9786147ae0, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/34bbac9d2bac4029bd8a64c8b215ecab, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/41e6e48266c44a6982539fe9d8110698, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/d29c12df7592442bab7c5b36f8e9d18a, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/1849ae0f2bc4428190c95b97cf8afce2] to archive 2024-12-17T12:40:11,148 DEBUG [StoreCloser-TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-17T12:40:11,150 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/7d0031d838514968a52058de8a040799 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/7d0031d838514968a52058de8a040799 2024-12-17T12:40:11,150 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/e7a7c90af26b4c75a8bbc8a780367a4e to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/e7a7c90af26b4c75a8bbc8a780367a4e 2024-12-17T12:40:11,150 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/1b680e3efd5d48b2a1cded5f9a3161a5 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/1b680e3efd5d48b2a1cded5f9a3161a5 2024-12-17T12:40:11,150 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/f46dbdaebd9441fa929d3f9046bb8ffe to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/f46dbdaebd9441fa929d3f9046bb8ffe 2024-12-17T12:40:11,150 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/55f9c369db4342baaa8e955e1b3f6823 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/55f9c369db4342baaa8e955e1b3f6823 2024-12-17T12:40:11,150 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/56006a5a86b349709635719d584e9db1 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/56006a5a86b349709635719d584e9db1 2024-12-17T12:40:11,150 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/6cb2d1a04d53475dbbad3311491bbd72 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/6cb2d1a04d53475dbbad3311491bbd72 2024-12-17T12:40:11,150 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/276f2fc55a624f508c49cc328562b3a8 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/276f2fc55a624f508c49cc328562b3a8 2024-12-17T12:40:11,151 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/1971ae45c0ed43469fa2295c48d9c13f to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/1971ae45c0ed43469fa2295c48d9c13f 2024-12-17T12:40:11,151 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/4c0deb9657494b67baa4b03c4d0d8a7e to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/4c0deb9657494b67baa4b03c4d0d8a7e 2024-12-17T12:40:11,151 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/7c8d9acce68343d0b008e0e27935ea5d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/7c8d9acce68343d0b008e0e27935ea5d 2024-12-17T12:40:11,151 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/d784e3fadd2944f88e9b9ac70193c130 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/d784e3fadd2944f88e9b9ac70193c130 2024-12-17T12:40:11,152 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/e3588a1f8c64487f84cd1d2255db9405 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/e3588a1f8c64487f84cd1d2255db9405 2024-12-17T12:40:11,152 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/ac950479a6f04a4b9759041425a872b2 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/ac950479a6f04a4b9759041425a872b2 2024-12-17T12:40:11,152 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/32ff6925b75946f2b269fe2cabe3706b to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/32ff6925b75946f2b269fe2cabe3706b 2024-12-17T12:40:11,152 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/672a56ccb71b44dbb5f84d8533845ee7 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/672a56ccb71b44dbb5f84d8533845ee7 2024-12-17T12:40:11,152 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/34bbac9d2bac4029bd8a64c8b215ecab to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/34bbac9d2bac4029bd8a64c8b215ecab 2024-12-17T12:40:11,152 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/0abf8d1f17d64213a5c0d6d84ba7efb9 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/0abf8d1f17d64213a5c0d6d84ba7efb9 2024-12-17T12:40:11,152 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/a229a894f61d4e38aa011e9786147ae0 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/a229a894f61d4e38aa011e9786147ae0 2024-12-17T12:40:11,152 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/f85bf8efec7941779918a15108160606 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/f85bf8efec7941779918a15108160606 2024-12-17T12:40:11,153 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/41e6e48266c44a6982539fe9d8110698 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/41e6e48266c44a6982539fe9d8110698 2024-12-17T12:40:11,153 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/d29c12df7592442bab7c5b36f8e9d18a to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/d29c12df7592442bab7c5b36f8e9d18a 2024-12-17T12:40:11,153 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/1849ae0f2bc4428190c95b97cf8afce2 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/1849ae0f2bc4428190c95b97cf8afce2 2024-12-17T12:40:11,154 DEBUG [StoreCloser-TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/3254b4380bbf4091abdc87176505f2e2, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/89945f84eb2f4131a72ec63e3cd21c28, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/24a20b0a1975479d84c2ad9cf7ac9639, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/d290113bf1cf4f79956963b7cb9da415, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/b8281b1e25cf49daacbf5b41dcda10fb, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/4f9e4c104deb4ed6b96b668bd4a2823b, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/c8bfa1dfe76842e5bc7aad2f69bef2aa, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/617c5de5fe0d4da1a7f08818e4ce99b8, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/af0c8ab25a9c4855aa509703cc206946, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/20f7a9846b13490bba02eace5354aae4, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/bfabe93df41d46cca4be4b2aa09b9dcd, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/34859c6cc9784157ae1fab3c38e239ea, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/27d1631cde274d5e9e03de90f6ae2df8, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/05342c7c12154fff85be7795c2335657, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/9e81199824a74431ab1a923ed3706f2f, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/ed46e0a3d58d4100a066efc0e9cc38dd, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/2e9803bb22dd41329ec4ff2223a86330, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/d3719b80a0f64a70b9289d2f6e2a460d, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/773fe73f47a14f82bfce015c948b22f6, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/91ff5b00cfbb422d9b20d093ac2bb313, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/004bdd3074884b989d8c70bdfec12d83, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/c3deb0046ab844bda37e1abd1058f3a6, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/eac04cf21a934607a527836a9f6a1c2d] to archive 2024-12-17T12:40:11,154 DEBUG [StoreCloser-TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-17T12:40:11,155 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/3254b4380bbf4091abdc87176505f2e2 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/3254b4380bbf4091abdc87176505f2e2 2024-12-17T12:40:11,155 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/89945f84eb2f4131a72ec63e3cd21c28 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/89945f84eb2f4131a72ec63e3cd21c28 2024-12-17T12:40:11,155 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/24a20b0a1975479d84c2ad9cf7ac9639 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/24a20b0a1975479d84c2ad9cf7ac9639 2024-12-17T12:40:11,155 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/b8281b1e25cf49daacbf5b41dcda10fb to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/b8281b1e25cf49daacbf5b41dcda10fb 2024-12-17T12:40:11,156 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/d290113bf1cf4f79956963b7cb9da415 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/d290113bf1cf4f79956963b7cb9da415 2024-12-17T12:40:11,156 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/4f9e4c104deb4ed6b96b668bd4a2823b to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/4f9e4c104deb4ed6b96b668bd4a2823b 2024-12-17T12:40:11,156 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/c8bfa1dfe76842e5bc7aad2f69bef2aa to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/c8bfa1dfe76842e5bc7aad2f69bef2aa 2024-12-17T12:40:11,156 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/617c5de5fe0d4da1a7f08818e4ce99b8 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/617c5de5fe0d4da1a7f08818e4ce99b8 2024-12-17T12:40:11,156 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/af0c8ab25a9c4855aa509703cc206946 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/af0c8ab25a9c4855aa509703cc206946 2024-12-17T12:40:11,156 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/20f7a9846b13490bba02eace5354aae4 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/20f7a9846b13490bba02eace5354aae4 2024-12-17T12:40:11,157 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/34859c6cc9784157ae1fab3c38e239ea to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/34859c6cc9784157ae1fab3c38e239ea 2024-12-17T12:40:11,157 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/bfabe93df41d46cca4be4b2aa09b9dcd to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/bfabe93df41d46cca4be4b2aa09b9dcd 2024-12-17T12:40:11,157 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/05342c7c12154fff85be7795c2335657 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/05342c7c12154fff85be7795c2335657 2024-12-17T12:40:11,157 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/27d1631cde274d5e9e03de90f6ae2df8 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/27d1631cde274d5e9e03de90f6ae2df8 2024-12-17T12:40:11,157 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/9e81199824a74431ab1a923ed3706f2f to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/9e81199824a74431ab1a923ed3706f2f 2024-12-17T12:40:11,157 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/ed46e0a3d58d4100a066efc0e9cc38dd to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/ed46e0a3d58d4100a066efc0e9cc38dd 2024-12-17T12:40:11,158 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/773fe73f47a14f82bfce015c948b22f6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/773fe73f47a14f82bfce015c948b22f6 2024-12-17T12:40:11,158 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/2e9803bb22dd41329ec4ff2223a86330 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/2e9803bb22dd41329ec4ff2223a86330 2024-12-17T12:40:11,158 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/d3719b80a0f64a70b9289d2f6e2a460d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/d3719b80a0f64a70b9289d2f6e2a460d 2024-12-17T12:40:11,158 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/004bdd3074884b989d8c70bdfec12d83 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/004bdd3074884b989d8c70bdfec12d83 2024-12-17T12:40:11,158 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/91ff5b00cfbb422d9b20d093ac2bb313 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/91ff5b00cfbb422d9b20d093ac2bb313 2024-12-17T12:40:11,158 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/c3deb0046ab844bda37e1abd1058f3a6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/c3deb0046ab844bda37e1abd1058f3a6 2024-12-17T12:40:11,158 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/eac04cf21a934607a527836a9f6a1c2d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/eac04cf21a934607a527836a9f6a1c2d 2024-12-17T12:40:11,161 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/recovered.edits/345.seqid, newMaxSeqId=345, maxSeqId=1 2024-12-17T12:40:11,161 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845. 2024-12-17T12:40:11,161 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1635): Region close journal for 224d188997b94bb7d93c906d2c2bf845: 2024-12-17T12:40:11,162 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] handler.UnassignRegionHandler(170): Closed 224d188997b94bb7d93c906d2c2bf845 2024-12-17T12:40:11,162 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=132 updating hbase:meta row=224d188997b94bb7d93c906d2c2bf845, regionState=CLOSED 2024-12-17T12:40:11,164 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=132 2024-12-17T12:40:11,164 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=132, state=SUCCESS; CloseRegionProcedure 224d188997b94bb7d93c906d2c2bf845, server=681c08bfdbdf,36491,1734439058372 in 1.4580 sec 2024-12-17T12:40:11,165 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=132, resume processing ppid=131 2024-12-17T12:40:11,165 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, ppid=131, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=224d188997b94bb7d93c906d2c2bf845, UNASSIGN in 1.4620 sec 2024-12-17T12:40:11,166 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=131, resume processing ppid=130 2024-12-17T12:40:11,166 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, ppid=130, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.4650 sec 2024-12-17T12:40:11,167 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734439211166"}]},"ts":"1734439211166"} 2024-12-17T12:40:11,167 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-17T12:40:11,207 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-17T12:40:11,209 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.5590 sec 2024-12-17T12:40:11,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-17T12:40:11,761 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 130 completed 2024-12-17T12:40:11,762 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-17T12:40:11,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=134, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-17T12:40:11,766 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=134, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-17T12:40:11,768 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=134, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-17T12:40:11,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-17T12:40:11,770 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845 2024-12-17T12:40:11,774 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A, FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B, FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C, FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/recovered.edits] 2024-12-17T12:40:11,778 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/46211c8345524cec8da4acbe43c1dbb6 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/46211c8345524cec8da4acbe43c1dbb6 2024-12-17T12:40:11,778 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/6104a131ba4347b49e233e1fb9cb01df to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/A/6104a131ba4347b49e233e1fb9cb01df 2024-12-17T12:40:11,782 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/61538d3c2fee4c7183249c11e68c9561 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/61538d3c2fee4c7183249c11e68c9561 2024-12-17T12:40:11,782 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/d705b404816c4b3d9714dc3a0a191f94 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/B/d705b404816c4b3d9714dc3a0a191f94 2024-12-17T12:40:11,786 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/24274853fb194a24a791f1ceff128198 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/24274853fb194a24a791f1ceff128198 2024-12-17T12:40:11,786 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/53e82fedf67846b4a33a603ce9cb006a to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/C/53e82fedf67846b4a33a603ce9cb006a 2024-12-17T12:40:11,790 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/recovered.edits/345.seqid to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845/recovered.edits/345.seqid 2024-12-17T12:40:11,791 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/224d188997b94bb7d93c906d2c2bf845 2024-12-17T12:40:11,791 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-17T12:40:11,794 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=134, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-17T12:40:11,796 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-17T12:40:11,798 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-17T12:40:11,800 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=134, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-17T12:40:11,800 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-17T12:40:11,800 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734439211800"}]},"ts":"9223372036854775807"} 2024-12-17T12:40:11,803 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-17T12:40:11,803 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 224d188997b94bb7d93c906d2c2bf845, NAME => 'TestAcidGuarantees,,1734439180528.224d188997b94bb7d93c906d2c2bf845.', STARTKEY => '', ENDKEY => ''}] 2024-12-17T12:40:11,803 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-17T12:40:11,803 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734439211803"}]},"ts":"9223372036854775807"} 2024-12-17T12:40:11,805 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-17T12:40:11,841 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=134, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-17T12:40:11,842 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 79 msec 2024-12-17T12:40:11,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-17T12:40:11,869 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 134 completed 2024-12-17T12:40:11,877 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=244 (was 244), OpenFileDescriptor=454 (was 449) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=329 (was 311) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3707 (was 3716) 2024-12-17T12:40:11,884 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=244, OpenFileDescriptor=454, MaxFileDescriptor=1048576, SystemLoadAverage=329, ProcessCount=11, AvailableMemoryMB=3707 2024-12-17T12:40:11,886 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-17T12:40:11,886 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-17T12:40:11,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=135, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-17T12:40:11,887 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=135, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-17T12:40:11,888 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:11,888 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 135 2024-12-17T12:40:11,888 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=135, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-17T12:40:11,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=135 2024-12-17T12:40:11,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742301_1477 (size=963) 2024-12-17T12:40:11,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=135 2024-12-17T12:40:12,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=135 2024-12-17T12:40:12,298 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9 2024-12-17T12:40:12,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742302_1478 (size=53) 2024-12-17T12:40:12,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=135 2024-12-17T12:40:12,709 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-17T12:40:12,709 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 05f19a912451e6e90726d599fdf98d6d, disabling compactions & flushes 2024-12-17T12:40:12,709 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:12,710 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:12,710 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. after waiting 0 ms 2024-12-17T12:40:12,710 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:12,710 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:12,710 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 05f19a912451e6e90726d599fdf98d6d: 2024-12-17T12:40:12,712 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=135, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-17T12:40:12,713 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1734439212712"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734439212712"}]},"ts":"1734439212712"} 2024-12-17T12:40:12,715 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-17T12:40:12,716 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=135, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-17T12:40:12,717 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734439212716"}]},"ts":"1734439212716"} 2024-12-17T12:40:12,718 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-17T12:40:12,766 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=136, ppid=135, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=05f19a912451e6e90726d599fdf98d6d, ASSIGN}] 2024-12-17T12:40:12,769 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=136, ppid=135, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=05f19a912451e6e90726d599fdf98d6d, ASSIGN 2024-12-17T12:40:12,770 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=136, ppid=135, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=05f19a912451e6e90726d599fdf98d6d, ASSIGN; state=OFFLINE, location=681c08bfdbdf,36491,1734439058372; forceNewPlan=false, retain=false 2024-12-17T12:40:12,921 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=136 updating hbase:meta row=05f19a912451e6e90726d599fdf98d6d, regionState=OPENING, regionLocation=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:12,923 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE; OpenRegionProcedure 05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372}] 2024-12-17T12:40:12,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=135 2024-12-17T12:40:13,077 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:13,084 INFO [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=137}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:13,084 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=137}] regionserver.HRegion(7285): Opening region: {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} 2024-12-17T12:40:13,084 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=137}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:13,084 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=137}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-17T12:40:13,084 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=137}] regionserver.HRegion(7327): checking encryption for 05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:13,084 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=137}] regionserver.HRegion(7330): checking classloading for 05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:13,086 INFO [StoreOpener-05f19a912451e6e90726d599fdf98d6d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:13,088 INFO [StoreOpener-05f19a912451e6e90726d599fdf98d6d-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-17T12:40:13,088 INFO [StoreOpener-05f19a912451e6e90726d599fdf98d6d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 05f19a912451e6e90726d599fdf98d6d columnFamilyName A 2024-12-17T12:40:13,088 DEBUG [StoreOpener-05f19a912451e6e90726d599fdf98d6d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:13,089 INFO [StoreOpener-05f19a912451e6e90726d599fdf98d6d-1 {}] regionserver.HStore(327): Store=05f19a912451e6e90726d599fdf98d6d/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-17T12:40:13,089 INFO [StoreOpener-05f19a912451e6e90726d599fdf98d6d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:13,090 INFO [StoreOpener-05f19a912451e6e90726d599fdf98d6d-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-17T12:40:13,090 INFO [StoreOpener-05f19a912451e6e90726d599fdf98d6d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 05f19a912451e6e90726d599fdf98d6d columnFamilyName B 2024-12-17T12:40:13,090 DEBUG [StoreOpener-05f19a912451e6e90726d599fdf98d6d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:13,091 INFO [StoreOpener-05f19a912451e6e90726d599fdf98d6d-1 {}] regionserver.HStore(327): Store=05f19a912451e6e90726d599fdf98d6d/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-17T12:40:13,091 INFO [StoreOpener-05f19a912451e6e90726d599fdf98d6d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:13,092 INFO [StoreOpener-05f19a912451e6e90726d599fdf98d6d-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-17T12:40:13,093 INFO [StoreOpener-05f19a912451e6e90726d599fdf98d6d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 05f19a912451e6e90726d599fdf98d6d columnFamilyName C 2024-12-17T12:40:13,093 DEBUG [StoreOpener-05f19a912451e6e90726d599fdf98d6d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:13,093 INFO [StoreOpener-05f19a912451e6e90726d599fdf98d6d-1 {}] regionserver.HStore(327): Store=05f19a912451e6e90726d599fdf98d6d/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-17T12:40:13,093 INFO [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=137}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:13,094 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=137}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:13,095 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=137}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:13,097 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=137}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-17T12:40:13,098 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=137}] regionserver.HRegion(1085): writing seq id for 05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:13,101 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=137}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-17T12:40:13,102 INFO [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=137}] regionserver.HRegion(1102): Opened 05f19a912451e6e90726d599fdf98d6d; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61603491, jitterRate=-0.08203645050525665}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-17T12:40:13,103 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=137}] regionserver.HRegion(1001): Region open journal for 05f19a912451e6e90726d599fdf98d6d: 2024-12-17T12:40:13,104 INFO [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=137}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d., pid=137, masterSystemTime=1734439213077 2024-12-17T12:40:13,106 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=137}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:13,106 INFO [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=137}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:13,106 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=136 updating hbase:meta row=05f19a912451e6e90726d599fdf98d6d, regionState=OPEN, openSeqNum=2, regionLocation=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:13,109 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=137, resume processing ppid=136 2024-12-17T12:40:13,109 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; OpenRegionProcedure 05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 in 184 msec 2024-12-17T12:40:13,110 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=136, resume processing ppid=135 2024-12-17T12:40:13,110 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, ppid=135, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=05f19a912451e6e90726d599fdf98d6d, ASSIGN in 343 msec 2024-12-17T12:40:13,111 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=135, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-17T12:40:13,111 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734439213111"}]},"ts":"1734439213111"} 2024-12-17T12:40:13,112 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-17T12:40:13,133 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=135, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-17T12:40:13,135 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2470 sec 2024-12-17T12:40:13,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=135 2024-12-17T12:40:13,999 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 135 completed 2024-12-17T12:40:14,001 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x34cb3991 to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7e55eb7 2024-12-17T12:40:14,069 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4dfb20f6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:40:14,072 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:40:14,074 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43888, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:40:14,076 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-17T12:40:14,078 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55974, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-17T12:40:14,081 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-17T12:40:14,081 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-17T12:40:14,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=138, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-17T12:40:14,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742303_1479 (size=999) 2024-12-17T12:40:14,494 DEBUG [PEWorker-3 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-17T12:40:14,494 INFO [PEWorker-3 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-17T12:40:14,499 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=138, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-17T12:40:14,501 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=140, ppid=139, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=05f19a912451e6e90726d599fdf98d6d, REOPEN/MOVE}] 2024-12-17T12:40:14,502 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=140, ppid=139, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=05f19a912451e6e90726d599fdf98d6d, REOPEN/MOVE 2024-12-17T12:40:14,502 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=140 updating hbase:meta row=05f19a912451e6e90726d599fdf98d6d, regionState=CLOSING, regionLocation=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:14,503 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-17T12:40:14,503 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE; CloseRegionProcedure 05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372}] 2024-12-17T12:40:14,655 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:14,655 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] handler.UnassignRegionHandler(124): Close 05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:14,656 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-17T12:40:14,656 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.HRegion(1681): Closing 05f19a912451e6e90726d599fdf98d6d, disabling compactions & flushes 2024-12-17T12:40:14,656 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:14,656 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:14,656 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. after waiting 0 ms 2024-12-17T12:40:14,656 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:14,663 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-17T12:40:14,664 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:14,664 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.HRegion(1635): Region close journal for 05f19a912451e6e90726d599fdf98d6d: 2024-12-17T12:40:14,665 WARN [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.HRegionServer(3786): Not adding moved region record: 05f19a912451e6e90726d599fdf98d6d to self. 2024-12-17T12:40:14,667 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] handler.UnassignRegionHandler(170): Closed 05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:14,667 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=140 updating hbase:meta row=05f19a912451e6e90726d599fdf98d6d, regionState=CLOSED 2024-12-17T12:40:14,670 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=141, resume processing ppid=140 2024-12-17T12:40:14,670 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, ppid=140, state=SUCCESS; CloseRegionProcedure 05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 in 166 msec 2024-12-17T12:40:14,671 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=140, ppid=139, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=05f19a912451e6e90726d599fdf98d6d, REOPEN/MOVE; state=CLOSED, location=681c08bfdbdf,36491,1734439058372; forceNewPlan=false, retain=true 2024-12-17T12:40:14,822 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=140 updating hbase:meta row=05f19a912451e6e90726d599fdf98d6d, regionState=OPENING, regionLocation=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:14,824 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=142, ppid=140, state=RUNNABLE; OpenRegionProcedure 05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372}] 2024-12-17T12:40:14,977 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:14,981 INFO [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=142}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:14,981 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=142}] regionserver.HRegion(7285): Opening region: {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} 2024-12-17T12:40:14,982 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=142}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:14,982 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=142}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-17T12:40:14,982 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=142}] regionserver.HRegion(7327): checking encryption for 05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:14,982 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=142}] regionserver.HRegion(7330): checking classloading for 05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:14,984 INFO [StoreOpener-05f19a912451e6e90726d599fdf98d6d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:14,985 INFO [StoreOpener-05f19a912451e6e90726d599fdf98d6d-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-17T12:40:14,986 INFO [StoreOpener-05f19a912451e6e90726d599fdf98d6d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 05f19a912451e6e90726d599fdf98d6d columnFamilyName A 2024-12-17T12:40:14,988 DEBUG [StoreOpener-05f19a912451e6e90726d599fdf98d6d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:14,988 INFO [StoreOpener-05f19a912451e6e90726d599fdf98d6d-1 {}] regionserver.HStore(327): Store=05f19a912451e6e90726d599fdf98d6d/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-17T12:40:14,989 INFO [StoreOpener-05f19a912451e6e90726d599fdf98d6d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:14,990 INFO [StoreOpener-05f19a912451e6e90726d599fdf98d6d-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-17T12:40:14,990 INFO [StoreOpener-05f19a912451e6e90726d599fdf98d6d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 05f19a912451e6e90726d599fdf98d6d columnFamilyName B 2024-12-17T12:40:14,990 DEBUG [StoreOpener-05f19a912451e6e90726d599fdf98d6d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:14,991 INFO [StoreOpener-05f19a912451e6e90726d599fdf98d6d-1 {}] regionserver.HStore(327): Store=05f19a912451e6e90726d599fdf98d6d/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-17T12:40:14,991 INFO [StoreOpener-05f19a912451e6e90726d599fdf98d6d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:14,992 INFO [StoreOpener-05f19a912451e6e90726d599fdf98d6d-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-17T12:40:14,992 INFO [StoreOpener-05f19a912451e6e90726d599fdf98d6d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 05f19a912451e6e90726d599fdf98d6d columnFamilyName C 2024-12-17T12:40:14,992 DEBUG [StoreOpener-05f19a912451e6e90726d599fdf98d6d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:14,993 INFO [StoreOpener-05f19a912451e6e90726d599fdf98d6d-1 {}] regionserver.HStore(327): Store=05f19a912451e6e90726d599fdf98d6d/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-17T12:40:14,993 INFO [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=142}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:14,994 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=142}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:14,995 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=142}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:14,996 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=142}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-17T12:40:14,997 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=142}] regionserver.HRegion(1085): writing seq id for 05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:14,998 INFO [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=142}] regionserver.HRegion(1102): Opened 05f19a912451e6e90726d599fdf98d6d; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70038930, jitterRate=0.04366138577461243}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-17T12:40:14,999 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=142}] regionserver.HRegion(1001): Region open journal for 05f19a912451e6e90726d599fdf98d6d: 2024-12-17T12:40:14,999 INFO [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=142}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d., pid=142, masterSystemTime=1734439214977 2024-12-17T12:40:15,000 DEBUG [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=142}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:15,000 INFO [RS_OPEN_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_OPEN_REGION, pid=142}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:15,001 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=140 updating hbase:meta row=05f19a912451e6e90726d599fdf98d6d, regionState=OPEN, openSeqNum=5, regionLocation=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:15,003 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=142, resume processing ppid=140 2024-12-17T12:40:15,003 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, ppid=140, state=SUCCESS; OpenRegionProcedure 05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 in 178 msec 2024-12-17T12:40:15,004 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=140, resume processing ppid=139 2024-12-17T12:40:15,004 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, ppid=139, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=05f19a912451e6e90726d599fdf98d6d, REOPEN/MOVE in 502 msec 2024-12-17T12:40:15,007 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=138 2024-12-17T12:40:15,007 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=138, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 506 msec 2024-12-17T12:40:15,009 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 927 msec 2024-12-17T12:40:15,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-17T12:40:15,012 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2e9ae050 to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3a703d2 2024-12-17T12:40:15,061 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17cf7fc0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:40:15,063 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2fef31f8 to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@14ed1e44 2024-12-17T12:40:15,076 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78b04266, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:40:15,077 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0eb04aeb to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@72537a47 2024-12-17T12:40:15,088 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@88aa519, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:40:15,089 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6a0e9c8f to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@36642cb 2024-12-17T12:40:15,096 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e998dd3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:40:15,097 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0d68f787 to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3c299cfb 2024-12-17T12:40:15,105 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e4c79b8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:40:15,105 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x605827c9 to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2d1403c3 2024-12-17T12:40:15,113 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@328852db, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:40:15,114 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3677bd4f to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3bf0ba59 2024-12-17T12:40:15,126 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b9e2976, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:40:15,126 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x521aad6f to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6c86f707 2024-12-17T12:40:15,138 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@56e9a678, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:40:15,139 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6f5b2180 to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@34becda3 2024-12-17T12:40:15,151 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f7f772a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:40:15,152 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1df61dc9 to 127.0.0.1:59557 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5fe71801 2024-12-17T12:40:15,164 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@bf5e2f0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-17T12:40:15,169 DEBUG [hconnection-0x3547f35c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:40:15,169 DEBUG [hconnection-0x79ca8a53-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:40:15,170 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-17T12:40:15,170 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43902, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:40:15,170 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43918, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:40:15,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=143, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=143, table=TestAcidGuarantees 2024-12-17T12:40:15,171 DEBUG [hconnection-0x32b26d8a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:40:15,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=143 2024-12-17T12:40:15,172 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=143, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=143, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-17T12:40:15,172 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43926, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:40:15,172 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=143, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=143, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-17T12:40:15,173 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=144, ppid=143, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-17T12:40:15,173 DEBUG [hconnection-0x79012215-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:40:15,173 DEBUG [hconnection-0x7f9b87b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:40:15,174 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43936, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:40:15,174 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43948, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:40:15,174 DEBUG [hconnection-0x5214d3f2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:40:15,175 DEBUG [hconnection-0x663eb73a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:40:15,175 DEBUG [hconnection-0x5a68d681-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:40:15,175 DEBUG [hconnection-0x3728468b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:40:15,175 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43958, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:40:15,176 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43970, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:40:15,176 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43990, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:40:15,176 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43974, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:40:15,177 DEBUG [hconnection-0x1e386e06-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-17T12:40:15,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:15,179 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 05f19a912451e6e90726d599fdf98d6d 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-17T12:40:15,179 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44004, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-17T12:40:15,179 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05f19a912451e6e90726d599fdf98d6d, store=A 2024-12-17T12:40:15,179 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:15,179 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05f19a912451e6e90726d599fdf98d6d, store=B 2024-12-17T12:40:15,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:15,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05f19a912451e6e90726d599fdf98d6d, store=C 2024-12-17T12:40:15,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:15,197 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217155ce4d4418946b29238e912190ee5b6_05f19a912451e6e90726d599fdf98d6d is 50, key is test_row_0/A:col10/1734439215178/Put/seqid=0 2024-12-17T12:40:15,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742304_1480 (size=12154) 2024-12-17T12:40:15,216 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:15,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43990 deadline: 1734439275214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:15,217 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:15,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43926 deadline: 1734439275215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:15,217 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:15,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43936 deadline: 1734439275216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:15,218 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:15,218 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:15,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43902 deadline: 1734439275216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:15,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439275216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:15,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=143 2024-12-17T12:40:15,317 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:15,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43990 deadline: 1734439275317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:15,319 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:15,319 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:15,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43926 deadline: 1734439275318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:15,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43936 deadline: 1734439275318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:15,319 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:15,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43902 deadline: 1734439275318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:15,319 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:15,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439275318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:15,324 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:15,324 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=144 2024-12-17T12:40:15,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:15,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. as already flushing 2024-12-17T12:40:15,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:15,325 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=144}] handler.RSProcedureHandler(58): pid=144 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:15,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=144 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:15,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=144 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:15,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=143 2024-12-17T12:40:15,476 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:15,476 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=144 2024-12-17T12:40:15,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:15,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. as already flushing 2024-12-17T12:40:15,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:15,477 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=144}] handler.RSProcedureHandler(58): pid=144 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:15,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=144 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:15,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=144 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:15,521 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:15,521 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:15,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43990 deadline: 1734439275519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:15,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43936 deadline: 1734439275520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:15,521 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:15,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43902 deadline: 1734439275520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:15,521 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:15,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439275520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:15,522 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:15,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43926 deadline: 1734439275520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:15,601 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:15,604 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217155ce4d4418946b29238e912190ee5b6_05f19a912451e6e90726d599fdf98d6d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217155ce4d4418946b29238e912190ee5b6_05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:15,604 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/091ed521f5a2453598fc0e14379d2476, store: [table=TestAcidGuarantees family=A region=05f19a912451e6e90726d599fdf98d6d] 2024-12-17T12:40:15,605 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/091ed521f5a2453598fc0e14379d2476 is 175, key is test_row_0/A:col10/1734439215178/Put/seqid=0 2024-12-17T12:40:15,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742305_1481 (size=30955) 2024-12-17T12:40:15,628 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:15,629 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=144 2024-12-17T12:40:15,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:15,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. as already flushing 2024-12-17T12:40:15,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:15,629 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] handler.RSProcedureHandler(58): pid=144 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:15,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=144 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:15,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=144 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:15,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=143 2024-12-17T12:40:15,781 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:15,781 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=144 2024-12-17T12:40:15,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:15,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. as already flushing 2024-12-17T12:40:15,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:15,781 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=144}] handler.RSProcedureHandler(58): pid=144 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:15,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=144 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:15,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=144 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:15,823 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:15,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439275822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:15,823 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:15,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43936 deadline: 1734439275822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:15,824 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:15,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43902 deadline: 1734439275823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:15,825 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:15,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43990 deadline: 1734439275823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:15,826 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:15,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43926 deadline: 1734439275824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:15,932 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:15,932 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=144 2024-12-17T12:40:15,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:15,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. as already flushing 2024-12-17T12:40:15,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:15,933 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=144}] handler.RSProcedureHandler(58): pid=144 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:15,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=144 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:15,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=144 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:16,008 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=17, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/091ed521f5a2453598fc0e14379d2476 2024-12-17T12:40:16,030 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/58e6e265e811460092ea980dd9a9c144 is 50, key is test_row_0/B:col10/1734439215178/Put/seqid=0 2024-12-17T12:40:16,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742306_1482 (size=12001) 2024-12-17T12:40:16,084 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:16,085 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=144 2024-12-17T12:40:16,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:16,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. as already flushing 2024-12-17T12:40:16,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:16,085 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] handler.RSProcedureHandler(58): pid=144 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:16,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=144 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:16,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=144 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:16,236 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:16,237 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=144 2024-12-17T12:40:16,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:16,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. as already flushing 2024-12-17T12:40:16,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:16,237 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=144}] handler.RSProcedureHandler(58): pid=144 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:16,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=144 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:16,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=144 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:16,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=143 2024-12-17T12:40:16,325 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:16,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43936 deadline: 1734439276324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:16,325 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:16,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439276325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:16,328 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:16,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43902 deadline: 1734439276327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:16,328 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:16,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43926 deadline: 1734439276327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:16,330 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:16,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43990 deadline: 1734439276329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:16,388 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:16,388 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=144 2024-12-17T12:40:16,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:16,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. as already flushing 2024-12-17T12:40:16,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:16,389 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=144}] handler.RSProcedureHandler(58): pid=144 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:16,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=144 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:16,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=144 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:16,433 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/58e6e265e811460092ea980dd9a9c144 2024-12-17T12:40:16,453 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/22c00e7b461c4305b5992dd44790f63d is 50, key is test_row_0/C:col10/1734439215178/Put/seqid=0 2024-12-17T12:40:16,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742307_1483 (size=12001) 2024-12-17T12:40:16,456 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/22c00e7b461c4305b5992dd44790f63d 2024-12-17T12:40:16,460 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/091ed521f5a2453598fc0e14379d2476 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/091ed521f5a2453598fc0e14379d2476 2024-12-17T12:40:16,463 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/091ed521f5a2453598fc0e14379d2476, entries=150, sequenceid=17, filesize=30.2 K 2024-12-17T12:40:16,463 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/58e6e265e811460092ea980dd9a9c144 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/58e6e265e811460092ea980dd9a9c144 2024-12-17T12:40:16,465 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/58e6e265e811460092ea980dd9a9c144, entries=150, sequenceid=17, filesize=11.7 K 2024-12-17T12:40:16,466 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/22c00e7b461c4305b5992dd44790f63d as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/22c00e7b461c4305b5992dd44790f63d 2024-12-17T12:40:16,468 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/22c00e7b461c4305b5992dd44790f63d, entries=150, sequenceid=17, filesize=11.7 K 2024-12-17T12:40:16,469 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 05f19a912451e6e90726d599fdf98d6d in 1289ms, sequenceid=17, compaction requested=false 2024-12-17T12:40:16,469 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 05f19a912451e6e90726d599fdf98d6d: 2024-12-17T12:40:16,541 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:16,541 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=144 2024-12-17T12:40:16,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:16,541 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.HRegion(2837): Flushing 05f19a912451e6e90726d599fdf98d6d 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-17T12:40:16,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05f19a912451e6e90726d599fdf98d6d, store=A 2024-12-17T12:40:16,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:16,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05f19a912451e6e90726d599fdf98d6d, store=B 2024-12-17T12:40:16,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:16,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05f19a912451e6e90726d599fdf98d6d, store=C 2024-12-17T12:40:16,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:16,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121713f919758c584934a55635b6f0a39ae7_05f19a912451e6e90726d599fdf98d6d is 50, key is test_row_0/A:col10/1734439215215/Put/seqid=0 2024-12-17T12:40:16,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742308_1484 (size=12154) 2024-12-17T12:40:16,681 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-17T12:40:16,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:16,956 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121713f919758c584934a55635b6f0a39ae7_05f19a912451e6e90726d599fdf98d6d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121713f919758c584934a55635b6f0a39ae7_05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:16,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/95a3dd22d6bc433584815a43877984f7, store: [table=TestAcidGuarantees family=A region=05f19a912451e6e90726d599fdf98d6d] 2024-12-17T12:40:16,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/95a3dd22d6bc433584815a43877984f7 is 175, key is test_row_0/A:col10/1734439215215/Put/seqid=0 2024-12-17T12:40:16,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742309_1485 (size=30955) 2024-12-17T12:40:17,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=143 2024-12-17T12:40:17,332 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. as already flushing 2024-12-17T12:40:17,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:17,337 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:17,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43990 deadline: 1734439277336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:17,338 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:17,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439277336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:17,339 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:17,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43926 deadline: 1734439277337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:17,340 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:17,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43936 deadline: 1734439277337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:17,340 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:17,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43902 deadline: 1734439277338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:17,360 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/95a3dd22d6bc433584815a43877984f7 2024-12-17T12:40:17,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/07e5b339b75d433dab44b65a9d330610 is 50, key is test_row_0/B:col10/1734439215215/Put/seqid=0 2024-12-17T12:40:17,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742310_1486 (size=12001) 2024-12-17T12:40:17,440 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:17,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439277439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:17,440 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:17,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43926 deadline: 1734439277439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:17,442 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:17,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43936 deadline: 1734439277440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:17,442 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:17,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43902 deadline: 1734439277441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:17,642 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:17,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43926 deadline: 1734439277641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:17,643 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:17,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439277642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:17,645 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:17,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43936 deadline: 1734439277644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:17,645 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:17,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43902 deadline: 1734439277644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:17,769 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/07e5b339b75d433dab44b65a9d330610 2024-12-17T12:40:17,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/2ace280874e54a64adbbae312061d575 is 50, key is test_row_0/C:col10/1734439215215/Put/seqid=0 2024-12-17T12:40:17,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742311_1487 (size=12001) 2024-12-17T12:40:17,945 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:17,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439277944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:17,946 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:17,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43926 deadline: 1734439277945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:17,947 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:17,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43936 deadline: 1734439277946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:17,948 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:17,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43902 deadline: 1734439277947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:18,177 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/2ace280874e54a64adbbae312061d575 2024-12-17T12:40:18,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/95a3dd22d6bc433584815a43877984f7 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/95a3dd22d6bc433584815a43877984f7 2024-12-17T12:40:18,182 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/95a3dd22d6bc433584815a43877984f7, entries=150, sequenceid=41, filesize=30.2 K 2024-12-17T12:40:18,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/07e5b339b75d433dab44b65a9d330610 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/07e5b339b75d433dab44b65a9d330610 2024-12-17T12:40:18,186 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/07e5b339b75d433dab44b65a9d330610, entries=150, sequenceid=41, filesize=11.7 K 2024-12-17T12:40:18,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/2ace280874e54a64adbbae312061d575 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/2ace280874e54a64adbbae312061d575 2024-12-17T12:40:18,189 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/2ace280874e54a64adbbae312061d575, entries=150, sequenceid=41, filesize=11.7 K 2024-12-17T12:40:18,190 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 05f19a912451e6e90726d599fdf98d6d in 1649ms, sequenceid=41, compaction requested=false 2024-12-17T12:40:18,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.HRegion(2538): Flush status journal for 05f19a912451e6e90726d599fdf98d6d: 2024-12-17T12:40:18,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:18,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=144 2024-12-17T12:40:18,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4106): Remote procedure done, pid=144 2024-12-17T12:40:18,192 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=144, resume processing ppid=143 2024-12-17T12:40:18,192 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, ppid=143, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.0190 sec 2024-12-17T12:40:18,193 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=143, table=TestAcidGuarantees in 3.0220 sec 2024-12-17T12:40:18,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:18,449 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 05f19a912451e6e90726d599fdf98d6d 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-17T12:40:18,449 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05f19a912451e6e90726d599fdf98d6d, store=A 2024-12-17T12:40:18,449 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:18,449 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05f19a912451e6e90726d599fdf98d6d, store=B 2024-12-17T12:40:18,449 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:18,449 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05f19a912451e6e90726d599fdf98d6d, store=C 2024-12-17T12:40:18,449 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:18,454 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121714de5feb1f4d40f595a4ce76b99be506_05f19a912451e6e90726d599fdf98d6d is 50, key is test_row_0/A:col10/1734439218448/Put/seqid=0 2024-12-17T12:40:18,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742312_1488 (size=14594) 2024-12-17T12:40:18,462 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:18,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439278461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:18,463 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:18,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43902 deadline: 1734439278461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:18,463 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:18,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43936 deadline: 1734439278462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:18,464 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:18,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43926 deadline: 1734439278462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:18,564 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:18,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439278563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:18,565 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:18,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43902 deadline: 1734439278564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:18,565 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:18,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43936 deadline: 1734439278564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:18,566 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:18,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43926 deadline: 1734439278565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:18,767 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:18,767 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:18,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439278765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:18,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43936 deadline: 1734439278765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:18,767 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:18,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43902 deadline: 1734439278766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:18,768 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:18,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43926 deadline: 1734439278767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:18,858 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:18,860 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121714de5feb1f4d40f595a4ce76b99be506_05f19a912451e6e90726d599fdf98d6d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121714de5feb1f4d40f595a4ce76b99be506_05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:18,861 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/5a72482db11e4b4c9343e6524606bb3b, store: [table=TestAcidGuarantees family=A region=05f19a912451e6e90726d599fdf98d6d] 2024-12-17T12:40:18,861 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/5a72482db11e4b4c9343e6524606bb3b is 175, key is test_row_0/A:col10/1734439218448/Put/seqid=0 2024-12-17T12:40:18,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742313_1489 (size=39549) 2024-12-17T12:40:18,865 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=55, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/5a72482db11e4b4c9343e6524606bb3b 2024-12-17T12:40:18,869 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/700188936dc740df94eabc6191460f8d is 50, key is test_row_0/B:col10/1734439218448/Put/seqid=0 2024-12-17T12:40:18,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742314_1490 (size=12001) 2024-12-17T12:40:18,872 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/700188936dc740df94eabc6191460f8d 2024-12-17T12:40:18,877 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/f7928543c2de4a95a65c069251b1424d is 50, key is test_row_0/C:col10/1734439218448/Put/seqid=0 2024-12-17T12:40:18,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742315_1491 (size=12001) 2024-12-17T12:40:19,069 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:19,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439279067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:19,071 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:19,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43936 deadline: 1734439279069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:19,071 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:19,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43902 deadline: 1734439279069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:19,072 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:19,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43926 deadline: 1734439279070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:19,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=143 2024-12-17T12:40:19,276 INFO [Thread-2164 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 143 completed 2024-12-17T12:40:19,276 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-17T12:40:19,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=145, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=145, table=TestAcidGuarantees 2024-12-17T12:40:19,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=145 2024-12-17T12:40:19,278 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=145, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=145, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-17T12:40:19,278 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=145, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=145, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-17T12:40:19,278 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=146, ppid=145, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-17T12:40:19,301 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/f7928543c2de4a95a65c069251b1424d 2024-12-17T12:40:19,304 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/5a72482db11e4b4c9343e6524606bb3b as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/5a72482db11e4b4c9343e6524606bb3b 2024-12-17T12:40:19,307 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/5a72482db11e4b4c9343e6524606bb3b, entries=200, sequenceid=55, filesize=38.6 K 2024-12-17T12:40:19,308 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/700188936dc740df94eabc6191460f8d as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/700188936dc740df94eabc6191460f8d 2024-12-17T12:40:19,310 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/700188936dc740df94eabc6191460f8d, entries=150, sequenceid=55, filesize=11.7 K 2024-12-17T12:40:19,311 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/f7928543c2de4a95a65c069251b1424d as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/f7928543c2de4a95a65c069251b1424d 2024-12-17T12:40:19,314 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/f7928543c2de4a95a65c069251b1424d, entries=150, sequenceid=55, filesize=11.7 K 2024-12-17T12:40:19,314 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 05f19a912451e6e90726d599fdf98d6d in 865ms, sequenceid=55, compaction requested=true 2024-12-17T12:40:19,314 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 05f19a912451e6e90726d599fdf98d6d: 2024-12-17T12:40:19,314 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05f19a912451e6e90726d599fdf98d6d:A, priority=-2147483648, current under compaction store size is 1 2024-12-17T12:40:19,314 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:40:19,314 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:40:19,314 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05f19a912451e6e90726d599fdf98d6d:B, priority=-2147483648, current under compaction store size is 2 2024-12-17T12:40:19,315 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:40:19,315 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:40:19,315 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05f19a912451e6e90726d599fdf98d6d:C, priority=-2147483648, current under compaction store size is 3 2024-12-17T12:40:19,315 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:40:19,315 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101459 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:40:19,315 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:40:19,315 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): 05f19a912451e6e90726d599fdf98d6d/A is initiating minor compaction (all files) 2024-12-17T12:40:19,315 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 05f19a912451e6e90726d599fdf98d6d/B is initiating minor compaction (all files) 2024-12-17T12:40:19,315 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05f19a912451e6e90726d599fdf98d6d/B in TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:19,315 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05f19a912451e6e90726d599fdf98d6d/A in TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:19,315 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/091ed521f5a2453598fc0e14379d2476, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/95a3dd22d6bc433584815a43877984f7, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/5a72482db11e4b4c9343e6524606bb3b] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp, totalSize=99.1 K 2024-12-17T12:40:19,315 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/58e6e265e811460092ea980dd9a9c144, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/07e5b339b75d433dab44b65a9d330610, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/700188936dc740df94eabc6191460f8d] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp, totalSize=35.2 K 2024-12-17T12:40:19,315 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:19,316 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. files: [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/091ed521f5a2453598fc0e14379d2476, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/95a3dd22d6bc433584815a43877984f7, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/5a72482db11e4b4c9343e6524606bb3b] 2024-12-17T12:40:19,316 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 58e6e265e811460092ea980dd9a9c144, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1734439215175 2024-12-17T12:40:19,316 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 07e5b339b75d433dab44b65a9d330610, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1734439215215 2024-12-17T12:40:19,316 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 091ed521f5a2453598fc0e14379d2476, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1734439215175 2024-12-17T12:40:19,316 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 700188936dc740df94eabc6191460f8d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1734439217335 2024-12-17T12:40:19,316 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 95a3dd22d6bc433584815a43877984f7, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1734439215215 2024-12-17T12:40:19,316 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5a72482db11e4b4c9343e6524606bb3b, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1734439217335 2024-12-17T12:40:19,320 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=05f19a912451e6e90726d599fdf98d6d] 2024-12-17T12:40:19,322 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241217dfa62150c3a146be8b52813f11bf8bf3_05f19a912451e6e90726d599fdf98d6d store=[table=TestAcidGuarantees family=A region=05f19a912451e6e90726d599fdf98d6d] 2024-12-17T12:40:19,323 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05f19a912451e6e90726d599fdf98d6d#B#compaction#414 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:40:19,323 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241217dfa62150c3a146be8b52813f11bf8bf3_05f19a912451e6e90726d599fdf98d6d, store=[table=TestAcidGuarantees family=A region=05f19a912451e6e90726d599fdf98d6d] 2024-12-17T12:40:19,323 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217dfa62150c3a146be8b52813f11bf8bf3_05f19a912451e6e90726d599fdf98d6d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=05f19a912451e6e90726d599fdf98d6d] 2024-12-17T12:40:19,323 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/b7557f6ca4b944549f15bd7ef8762156 is 50, key is test_row_0/B:col10/1734439218448/Put/seqid=0 2024-12-17T12:40:19,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742316_1492 (size=4469) 2024-12-17T12:40:19,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742317_1493 (size=12104) 2024-12-17T12:40:19,330 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05f19a912451e6e90726d599fdf98d6d#A#compaction#413 average throughput is 2.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:40:19,331 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/69b772d7434540d3b7f1259689e7afc8 is 175, key is test_row_0/A:col10/1734439218448/Put/seqid=0 2024-12-17T12:40:19,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742318_1494 (size=31058) 2024-12-17T12:40:19,340 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/69b772d7434540d3b7f1259689e7afc8 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/69b772d7434540d3b7f1259689e7afc8 2024-12-17T12:40:19,344 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 05f19a912451e6e90726d599fdf98d6d/A of 05f19a912451e6e90726d599fdf98d6d into 69b772d7434540d3b7f1259689e7afc8(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:40:19,344 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05f19a912451e6e90726d599fdf98d6d: 2024-12-17T12:40:19,344 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d., storeName=05f19a912451e6e90726d599fdf98d6d/A, priority=13, startTime=1734439219314; duration=0sec 2024-12-17T12:40:19,344 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:40:19,344 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05f19a912451e6e90726d599fdf98d6d:A 2024-12-17T12:40:19,344 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:40:19,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:19,345 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 05f19a912451e6e90726d599fdf98d6d 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-17T12:40:19,345 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:40:19,345 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): 05f19a912451e6e90726d599fdf98d6d/C is initiating minor compaction (all files) 2024-12-17T12:40:19,345 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05f19a912451e6e90726d599fdf98d6d/C in TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:19,345 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05f19a912451e6e90726d599fdf98d6d, store=A 2024-12-17T12:40:19,345 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/22c00e7b461c4305b5992dd44790f63d, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/2ace280874e54a64adbbae312061d575, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/f7928543c2de4a95a65c069251b1424d] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp, totalSize=35.2 K 2024-12-17T12:40:19,345 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:19,345 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05f19a912451e6e90726d599fdf98d6d, store=B 2024-12-17T12:40:19,345 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:19,345 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05f19a912451e6e90726d599fdf98d6d, store=C 2024-12-17T12:40:19,345 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:19,345 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 22c00e7b461c4305b5992dd44790f63d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1734439215175 2024-12-17T12:40:19,346 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2ace280874e54a64adbbae312061d575, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1734439215215 2024-12-17T12:40:19,346 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting f7928543c2de4a95a65c069251b1424d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1734439217335 2024-12-17T12:40:19,368 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217f68e0ec6cd3041caa8852c8e0c88b572_05f19a912451e6e90726d599fdf98d6d is 50, key is test_row_0/A:col10/1734439219344/Put/seqid=0 2024-12-17T12:40:19,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742319_1495 (size=14594) 2024-12-17T12:40:19,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=145 2024-12-17T12:40:19,380 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:19,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43990 deadline: 1734439279378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:19,384 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05f19a912451e6e90726d599fdf98d6d#C#compaction#416 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:40:19,385 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/5678a241cc7f4041bf3a73deb84a63bf is 50, key is test_row_0/C:col10/1734439218448/Put/seqid=0 2024-12-17T12:40:19,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742320_1496 (size=12104) 2024-12-17T12:40:19,394 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/5678a241cc7f4041bf3a73deb84a63bf as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/5678a241cc7f4041bf3a73deb84a63bf 2024-12-17T12:40:19,397 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 05f19a912451e6e90726d599fdf98d6d/C of 05f19a912451e6e90726d599fdf98d6d into 5678a241cc7f4041bf3a73deb84a63bf(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:40:19,397 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05f19a912451e6e90726d599fdf98d6d: 2024-12-17T12:40:19,397 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d., storeName=05f19a912451e6e90726d599fdf98d6d/C, priority=13, startTime=1734439219315; duration=0sec 2024-12-17T12:40:19,397 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:40:19,397 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05f19a912451e6e90726d599fdf98d6d:C 2024-12-17T12:40:19,429 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:19,429 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=146 2024-12-17T12:40:19,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:19,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. as already flushing 2024-12-17T12:40:19,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:19,430 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=146}] handler.RSProcedureHandler(58): pid=146 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:19,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=146 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:19,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=146 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:19,482 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:19,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43990 deadline: 1734439279481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:19,574 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:19,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439279572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:19,574 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:19,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43936 deadline: 1734439279573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:19,575 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:19,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43926 deadline: 1734439279573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:19,578 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:19,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43902 deadline: 1734439279576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:19,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=145 2024-12-17T12:40:19,581 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:19,582 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=146 2024-12-17T12:40:19,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:19,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. as already flushing 2024-12-17T12:40:19,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:19,582 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=146}] handler.RSProcedureHandler(58): pid=146 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:19,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=146 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:19,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=146 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:19,684 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:19,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43990 deadline: 1734439279683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:19,733 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/b7557f6ca4b944549f15bd7ef8762156 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/b7557f6ca4b944549f15bd7ef8762156 2024-12-17T12:40:19,733 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:19,734 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=146 2024-12-17T12:40:19,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:19,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. as already flushing 2024-12-17T12:40:19,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:19,734 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] handler.RSProcedureHandler(58): pid=146 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:19,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=146 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:19,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=146 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:19,736 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 05f19a912451e6e90726d599fdf98d6d/B of 05f19a912451e6e90726d599fdf98d6d into b7557f6ca4b944549f15bd7ef8762156(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:40:19,736 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05f19a912451e6e90726d599fdf98d6d: 2024-12-17T12:40:19,736 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d., storeName=05f19a912451e6e90726d599fdf98d6d/B, priority=13, startTime=1734439219314; duration=0sec 2024-12-17T12:40:19,736 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:40:19,737 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05f19a912451e6e90726d599fdf98d6d:B 2024-12-17T12:40:19,773 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:19,776 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217f68e0ec6cd3041caa8852c8e0c88b572_05f19a912451e6e90726d599fdf98d6d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217f68e0ec6cd3041caa8852c8e0c88b572_05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:19,777 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/57bceb25fc014b5988c82128f9ab5b5c, store: [table=TestAcidGuarantees family=A region=05f19a912451e6e90726d599fdf98d6d] 2024-12-17T12:40:19,777 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/57bceb25fc014b5988c82128f9ab5b5c is 175, key is test_row_0/A:col10/1734439219344/Put/seqid=0 2024-12-17T12:40:19,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742321_1497 (size=39549) 2024-12-17T12:40:19,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=145 2024-12-17T12:40:19,886 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:19,886 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=146 2024-12-17T12:40:19,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:19,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. as already flushing 2024-12-17T12:40:19,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:19,886 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=146}] handler.RSProcedureHandler(58): pid=146 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:19,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=146 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:19,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=146 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:19,987 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:19,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43990 deadline: 1734439279985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:20,038 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:20,038 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=146 2024-12-17T12:40:20,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:20,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. as already flushing 2024-12-17T12:40:20,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:20,038 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=146}] handler.RSProcedureHandler(58): pid=146 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:20,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=146 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:20,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=146 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:20,190 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:20,190 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=146 2024-12-17T12:40:20,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:20,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. as already flushing 2024-12-17T12:40:20,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:20,190 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] handler.RSProcedureHandler(58): pid=146 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:20,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=146 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:20,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=146 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:20,205 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=79, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/57bceb25fc014b5988c82128f9ab5b5c 2024-12-17T12:40:20,211 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/b5a04b7195014db8a567f05981adec7f is 50, key is test_row_0/B:col10/1734439219344/Put/seqid=0 2024-12-17T12:40:20,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742322_1498 (size=12001) 2024-12-17T12:40:20,342 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:20,342 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=146 2024-12-17T12:40:20,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:20,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. as already flushing 2024-12-17T12:40:20,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:20,342 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=146}] handler.RSProcedureHandler(58): pid=146 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:20,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=146 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:20,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=146 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:20,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=145 2024-12-17T12:40:20,489 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:20,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43990 deadline: 1734439280488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:20,493 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:20,494 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=146 2024-12-17T12:40:20,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:20,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. as already flushing 2024-12-17T12:40:20,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:20,494 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=146}] handler.RSProcedureHandler(58): pid=146 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:20,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=146 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:20,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=146 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:20,578 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:20,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43936 deadline: 1734439280577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:20,579 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:20,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43926 deadline: 1734439280578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:20,583 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:20,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439280583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:20,588 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:20,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43902 deadline: 1734439280587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:20,615 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/b5a04b7195014db8a567f05981adec7f 2024-12-17T12:40:20,619 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/ad8f9329ee66438581582907271001aa is 50, key is test_row_0/C:col10/1734439219344/Put/seqid=0 2024-12-17T12:40:20,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742323_1499 (size=12001) 2024-12-17T12:40:20,645 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:20,646 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=146 2024-12-17T12:40:20,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:20,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. as already flushing 2024-12-17T12:40:20,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:20,646 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] handler.RSProcedureHandler(58): pid=146 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:20,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=146 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:20,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=146 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:20,797 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:20,798 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=146 2024-12-17T12:40:20,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:20,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. as already flushing 2024-12-17T12:40:20,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:20,798 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=146}] handler.RSProcedureHandler(58): pid=146 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:20,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=146 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:20,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=146 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:20,950 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:20,950 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=146 2024-12-17T12:40:20,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:20,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. as already flushing 2024-12-17T12:40:20,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:20,951 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=146}] handler.RSProcedureHandler(58): pid=146 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:20,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=146 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:20,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=146 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:21,022 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/ad8f9329ee66438581582907271001aa 2024-12-17T12:40:21,026 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/57bceb25fc014b5988c82128f9ab5b5c as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/57bceb25fc014b5988c82128f9ab5b5c 2024-12-17T12:40:21,028 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/57bceb25fc014b5988c82128f9ab5b5c, entries=200, sequenceid=79, filesize=38.6 K 2024-12-17T12:40:21,029 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/b5a04b7195014db8a567f05981adec7f as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/b5a04b7195014db8a567f05981adec7f 2024-12-17T12:40:21,031 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/b5a04b7195014db8a567f05981adec7f, entries=150, sequenceid=79, filesize=11.7 K 2024-12-17T12:40:21,032 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/ad8f9329ee66438581582907271001aa as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/ad8f9329ee66438581582907271001aa 2024-12-17T12:40:21,034 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/ad8f9329ee66438581582907271001aa, entries=150, sequenceid=79, filesize=11.7 K 2024-12-17T12:40:21,035 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 05f19a912451e6e90726d599fdf98d6d in 1690ms, sequenceid=79, compaction requested=false 2024-12-17T12:40:21,035 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 05f19a912451e6e90726d599fdf98d6d: 2024-12-17T12:40:21,102 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:21,102 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=146 2024-12-17T12:40:21,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:21,103 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.HRegion(2837): Flushing 05f19a912451e6e90726d599fdf98d6d 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-17T12:40:21,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05f19a912451e6e90726d599fdf98d6d, store=A 2024-12-17T12:40:21,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:21,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05f19a912451e6e90726d599fdf98d6d, store=B 2024-12-17T12:40:21,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:21,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05f19a912451e6e90726d599fdf98d6d, store=C 2024-12-17T12:40:21,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:21,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412176ffdb4e105f8443197d05fa27b337112_05f19a912451e6e90726d599fdf98d6d is 50, key is test_row_0/A:col10/1734439219364/Put/seqid=0 2024-12-17T12:40:21,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742324_1500 (size=12154) 2024-12-17T12:40:21,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=145 2024-12-17T12:40:21,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:21,498 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. as already flushing 2024-12-17T12:40:21,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:21,515 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412176ffdb4e105f8443197d05fa27b337112_05f19a912451e6e90726d599fdf98d6d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412176ffdb4e105f8443197d05fa27b337112_05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:21,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/36be9b6774194945ab205d8051ddb78d, store: [table=TestAcidGuarantees family=A region=05f19a912451e6e90726d599fdf98d6d] 2024-12-17T12:40:21,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/36be9b6774194945ab205d8051ddb78d is 175, key is test_row_0/A:col10/1734439219364/Put/seqid=0 2024-12-17T12:40:21,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742325_1501 (size=30955) 2024-12-17T12:40:21,538 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:21,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43990 deadline: 1734439281536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:21,640 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:21,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43990 deadline: 1734439281639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:21,843 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:21,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43990 deadline: 1734439281842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:21,919 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=94, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/36be9b6774194945ab205d8051ddb78d 2024-12-17T12:40:21,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/c48e789bd0ef4016871553a02f2b49f9 is 50, key is test_row_0/B:col10/1734439219364/Put/seqid=0 2024-12-17T12:40:21,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742326_1502 (size=12001) 2024-12-17T12:40:22,147 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:22,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43990 deadline: 1734439282146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:22,327 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/c48e789bd0ef4016871553a02f2b49f9 2024-12-17T12:40:22,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/c2cd6a2103e54ba49c4b8a2242ce4915 is 50, key is test_row_0/C:col10/1734439219364/Put/seqid=0 2024-12-17T12:40:22,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742327_1503 (size=12001) 2024-12-17T12:40:22,336 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/c2cd6a2103e54ba49c4b8a2242ce4915 2024-12-17T12:40:22,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/36be9b6774194945ab205d8051ddb78d as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/36be9b6774194945ab205d8051ddb78d 2024-12-17T12:40:22,341 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/36be9b6774194945ab205d8051ddb78d, entries=150, sequenceid=94, filesize=30.2 K 2024-12-17T12:40:22,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/c48e789bd0ef4016871553a02f2b49f9 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/c48e789bd0ef4016871553a02f2b49f9 2024-12-17T12:40:22,344 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/c48e789bd0ef4016871553a02f2b49f9, entries=150, sequenceid=94, filesize=11.7 K 2024-12-17T12:40:22,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/c2cd6a2103e54ba49c4b8a2242ce4915 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/c2cd6a2103e54ba49c4b8a2242ce4915 2024-12-17T12:40:22,347 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/c2cd6a2103e54ba49c4b8a2242ce4915, entries=150, sequenceid=94, filesize=11.7 K 2024-12-17T12:40:22,347 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 05f19a912451e6e90726d599fdf98d6d in 1244ms, sequenceid=94, compaction requested=true 2024-12-17T12:40:22,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.HRegion(2538): Flush status journal for 05f19a912451e6e90726d599fdf98d6d: 2024-12-17T12:40:22,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:22,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=146 2024-12-17T12:40:22,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4106): Remote procedure done, pid=146 2024-12-17T12:40:22,349 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=146, resume processing ppid=145 2024-12-17T12:40:22,350 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, ppid=145, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.0710 sec 2024-12-17T12:40:22,356 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=145, table=TestAcidGuarantees in 3.0790 sec 2024-12-17T12:40:22,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:22,587 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 05f19a912451e6e90726d599fdf98d6d 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-17T12:40:22,587 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05f19a912451e6e90726d599fdf98d6d, store=A 2024-12-17T12:40:22,588 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:22,588 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05f19a912451e6e90726d599fdf98d6d, store=B 2024-12-17T12:40:22,588 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:22,588 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05f19a912451e6e90726d599fdf98d6d, store=C 2024-12-17T12:40:22,588 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:22,592 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217de33a658b3f844709c052f3811d088e3_05f19a912451e6e90726d599fdf98d6d is 50, key is test_row_0/A:col10/1734439221536/Put/seqid=0 2024-12-17T12:40:22,603 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:22,603 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:22,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742328_1504 (size=14594) 2024-12-17T12:40:22,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439282598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:22,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43926 deadline: 1734439282599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:22,603 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:22,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43936 deadline: 1734439282600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:22,604 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:22,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43902 deadline: 1734439282602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:22,605 DEBUG [Thread-2160 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4143 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d., hostname=681c08bfdbdf,36491,1734439058372, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor41.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-17T12:40:22,649 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:22,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43990 deadline: 1734439282648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:22,706 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:22,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439282704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:22,706 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:22,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43926 deadline: 1734439282704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:22,707 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:22,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43936 deadline: 1734439282704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:22,908 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:22,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43926 deadline: 1734439282907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:22,909 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:22,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43936 deadline: 1734439282908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:22,909 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:22,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439282908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:23,004 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:23,006 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217de33a658b3f844709c052f3811d088e3_05f19a912451e6e90726d599fdf98d6d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217de33a658b3f844709c052f3811d088e3_05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:23,007 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/84999441224f44868d22efbd74e7b96e, store: [table=TestAcidGuarantees family=A region=05f19a912451e6e90726d599fdf98d6d] 2024-12-17T12:40:23,008 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/84999441224f44868d22efbd74e7b96e is 175, key is test_row_0/A:col10/1734439221536/Put/seqid=0 2024-12-17T12:40:23,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742329_1505 (size=39549) 2024-12-17T12:40:23,010 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=118, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/84999441224f44868d22efbd74e7b96e 2024-12-17T12:40:23,015 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/aec274a4aabb4ebfa77d0d79783fed81 is 50, key is test_row_0/B:col10/1734439221536/Put/seqid=0 2024-12-17T12:40:23,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742330_1506 (size=12001) 2024-12-17T12:40:23,210 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:23,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43926 deadline: 1734439283209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:23,212 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:23,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43936 deadline: 1734439283211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:23,213 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:23,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439283211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:23,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=145 2024-12-17T12:40:23,381 INFO [Thread-2164 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 145 completed 2024-12-17T12:40:23,382 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-17T12:40:23,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=147, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=147, table=TestAcidGuarantees 2024-12-17T12:40:23,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-12-17T12:40:23,384 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=147, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=147, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-17T12:40:23,384 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=147, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=147, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-17T12:40:23,384 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=148, ppid=147, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-17T12:40:23,419 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/aec274a4aabb4ebfa77d0d79783fed81 2024-12-17T12:40:23,424 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/c4b0281920fe4b4cba17c2f289a38c11 is 50, key is test_row_0/C:col10/1734439221536/Put/seqid=0 2024-12-17T12:40:23,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742331_1507 (size=12001) 2024-12-17T12:40:23,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-12-17T12:40:23,535 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:23,536 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=148 2024-12-17T12:40:23,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:23,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. as already flushing 2024-12-17T12:40:23,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:23,536 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=148}] handler.RSProcedureHandler(58): pid=148 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:23,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=148 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:23,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=148 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:23,652 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:23,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43990 deadline: 1734439283650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:23,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-12-17T12:40:23,687 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:23,688 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=148 2024-12-17T12:40:23,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:23,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. as already flushing 2024-12-17T12:40:23,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:23,688 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=148}] handler.RSProcedureHandler(58): pid=148 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:23,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=148 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:23,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=148 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:23,712 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:23,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43926 deadline: 1734439283711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:23,716 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:23,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439283715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:23,718 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:23,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43936 deadline: 1734439283717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:23,828 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/c4b0281920fe4b4cba17c2f289a38c11 2024-12-17T12:40:23,831 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/84999441224f44868d22efbd74e7b96e as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/84999441224f44868d22efbd74e7b96e 2024-12-17T12:40:23,833 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/84999441224f44868d22efbd74e7b96e, entries=200, sequenceid=118, filesize=38.6 K 2024-12-17T12:40:23,834 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/aec274a4aabb4ebfa77d0d79783fed81 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/aec274a4aabb4ebfa77d0d79783fed81 2024-12-17T12:40:23,836 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/aec274a4aabb4ebfa77d0d79783fed81, entries=150, sequenceid=118, filesize=11.7 K 2024-12-17T12:40:23,837 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/c4b0281920fe4b4cba17c2f289a38c11 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/c4b0281920fe4b4cba17c2f289a38c11 2024-12-17T12:40:23,839 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/c4b0281920fe4b4cba17c2f289a38c11, entries=150, sequenceid=118, filesize=11.7 K 2024-12-17T12:40:23,839 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 05f19a912451e6e90726d599fdf98d6d in 1252ms, sequenceid=118, compaction requested=true 2024-12-17T12:40:23,839 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 05f19a912451e6e90726d599fdf98d6d: 2024-12-17T12:40:23,840 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:23,840 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05f19a912451e6e90726d599fdf98d6d:A, priority=-2147483648, current under compaction store size is 1 2024-12-17T12:40:23,840 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:40:23,840 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05f19a912451e6e90726d599fdf98d6d:B, priority=-2147483648, current under compaction store size is 2 2024-12-17T12:40:23,840 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:40:23,840 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-17T12:40:23,840 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05f19a912451e6e90726d599fdf98d6d:C, priority=-2147483648, current under compaction store size is 3 2024-12-17T12:40:23,840 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-17T12:40:23,840 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=148 2024-12-17T12:40:23,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:23,840 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.HRegion(2837): Flushing 05f19a912451e6e90726d599fdf98d6d 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-17T12:40:23,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05f19a912451e6e90726d599fdf98d6d, store=A 2024-12-17T12:40:23,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:23,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05f19a912451e6e90726d599fdf98d6d, store=B 2024-12-17T12:40:23,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:23,841 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 141111 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-17T12:40:23,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05f19a912451e6e90726d599fdf98d6d, store=C 2024-12-17T12:40:23,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:23,841 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 05f19a912451e6e90726d599fdf98d6d/A is initiating minor compaction (all files) 2024-12-17T12:40:23,841 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05f19a912451e6e90726d599fdf98d6d/A in TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:23,841 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/69b772d7434540d3b7f1259689e7afc8, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/57bceb25fc014b5988c82128f9ab5b5c, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/36be9b6774194945ab205d8051ddb78d, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/84999441224f44868d22efbd74e7b96e] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp, totalSize=137.8 K 2024-12-17T12:40:23,841 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:23,841 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. files: [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/69b772d7434540d3b7f1259689e7afc8, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/57bceb25fc014b5988c82128f9ab5b5c, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/36be9b6774194945ab205d8051ddb78d, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/84999441224f44868d22efbd74e7b96e] 2024-12-17T12:40:23,841 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 69b772d7434540d3b7f1259689e7afc8, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1734439217335 2024-12-17T12:40:23,841 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 57bceb25fc014b5988c82128f9ab5b5c, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1734439218460 2024-12-17T12:40:23,841 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 36be9b6774194945ab205d8051ddb78d, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1734439219364 2024-12-17T12:40:23,842 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 84999441224f44868d22efbd74e7b96e, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1734439221524 2024-12-17T12:40:23,844 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-17T12:40:23,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217e6dfcdd6150f4ae4be5c4c54bcf473b0_05f19a912451e6e90726d599fdf98d6d is 50, key is test_row_0/A:col10/1734439222591/Put/seqid=0 2024-12-17T12:40:23,846 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-17T12:40:23,846 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): 05f19a912451e6e90726d599fdf98d6d/B is initiating minor compaction (all files) 2024-12-17T12:40:23,846 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05f19a912451e6e90726d599fdf98d6d/B in TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:23,846 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/b7557f6ca4b944549f15bd7ef8762156, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/b5a04b7195014db8a567f05981adec7f, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/c48e789bd0ef4016871553a02f2b49f9, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/aec274a4aabb4ebfa77d0d79783fed81] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp, totalSize=47.0 K 2024-12-17T12:40:23,847 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting b7557f6ca4b944549f15bd7ef8762156, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1734439217335 2024-12-17T12:40:23,847 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting b5a04b7195014db8a567f05981adec7f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1734439218461 2024-12-17T12:40:23,847 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting c48e789bd0ef4016871553a02f2b49f9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1734439219364 2024-12-17T12:40:23,847 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting aec274a4aabb4ebfa77d0d79783fed81, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1734439221524 2024-12-17T12:40:23,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742332_1508 (size=12204) 2024-12-17T12:40:23,849 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=05f19a912451e6e90726d599fdf98d6d] 2024-12-17T12:40:23,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:23,852 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241217d861f6d37e7d4cbfb770f8dc784c1e94_05f19a912451e6e90726d599fdf98d6d store=[table=TestAcidGuarantees family=A region=05f19a912451e6e90726d599fdf98d6d] 2024-12-17T12:40:23,854 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217e6dfcdd6150f4ae4be5c4c54bcf473b0_05f19a912451e6e90726d599fdf98d6d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217e6dfcdd6150f4ae4be5c4c54bcf473b0_05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:23,854 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241217d861f6d37e7d4cbfb770f8dc784c1e94_05f19a912451e6e90726d599fdf98d6d, store=[table=TestAcidGuarantees family=A region=05f19a912451e6e90726d599fdf98d6d] 2024-12-17T12:40:23,854 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217d861f6d37e7d4cbfb770f8dc784c1e94_05f19a912451e6e90726d599fdf98d6d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=05f19a912451e6e90726d599fdf98d6d] 2024-12-17T12:40:23,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/3da0081aee2e4492beb3d1c2d162f762, store: [table=TestAcidGuarantees family=A region=05f19a912451e6e90726d599fdf98d6d] 2024-12-17T12:40:23,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/3da0081aee2e4492beb3d1c2d162f762 is 175, key is test_row_0/A:col10/1734439222591/Put/seqid=0 2024-12-17T12:40:23,865 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05f19a912451e6e90726d599fdf98d6d#B#compaction#427 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:40:23,865 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/a0ed9c1d11844c9288159a1884b054c4 is 50, key is test_row_0/B:col10/1734439221536/Put/seqid=0 2024-12-17T12:40:23,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742333_1509 (size=4469) 2024-12-17T12:40:23,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742334_1510 (size=31005) 2024-12-17T12:40:23,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742335_1511 (size=12241) 2024-12-17T12:40:23,887 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/a0ed9c1d11844c9288159a1884b054c4 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/a0ed9c1d11844c9288159a1884b054c4 2024-12-17T12:40:23,891 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 05f19a912451e6e90726d599fdf98d6d/B of 05f19a912451e6e90726d599fdf98d6d into a0ed9c1d11844c9288159a1884b054c4(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:40:23,891 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05f19a912451e6e90726d599fdf98d6d: 2024-12-17T12:40:23,891 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d., storeName=05f19a912451e6e90726d599fdf98d6d/B, priority=12, startTime=1734439223840; duration=0sec 2024-12-17T12:40:23,891 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:40:23,891 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05f19a912451e6e90726d599fdf98d6d:B 2024-12-17T12:40:23,891 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-17T12:40:23,892 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-17T12:40:23,892 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): 05f19a912451e6e90726d599fdf98d6d/C is initiating minor compaction (all files) 2024-12-17T12:40:23,892 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05f19a912451e6e90726d599fdf98d6d/C in TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:23,892 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/5678a241cc7f4041bf3a73deb84a63bf, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/ad8f9329ee66438581582907271001aa, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/c2cd6a2103e54ba49c4b8a2242ce4915, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/c4b0281920fe4b4cba17c2f289a38c11] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp, totalSize=47.0 K 2024-12-17T12:40:23,892 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5678a241cc7f4041bf3a73deb84a63bf, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1734439217335 2024-12-17T12:40:23,892 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting ad8f9329ee66438581582907271001aa, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1734439218461 2024-12-17T12:40:23,893 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting c2cd6a2103e54ba49c4b8a2242ce4915, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1734439219364 2024-12-17T12:40:23,893 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting c4b0281920fe4b4cba17c2f289a38c11, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1734439221524 2024-12-17T12:40:23,899 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05f19a912451e6e90726d599fdf98d6d#C#compaction#428 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:40:23,900 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/60ae712495274839a370ee9ce50415c1 is 50, key is test_row_0/C:col10/1734439221536/Put/seqid=0 2024-12-17T12:40:23,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742336_1512 (size=12241) 2024-12-17T12:40:23,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-12-17T12:40:24,283 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=130, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/3da0081aee2e4492beb3d1c2d162f762 2024-12-17T12:40:24,283 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05f19a912451e6e90726d599fdf98d6d#A#compaction#426 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:40:24,284 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/1d7ac7c71d884b66a9f00846bf5c4841 is 175, key is test_row_0/A:col10/1734439221536/Put/seqid=0 2024-12-17T12:40:24,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742337_1513 (size=31195) 2024-12-17T12:40:24,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/592ff35fc3fc4a2fbbff77e38ffb7732 is 50, key is test_row_0/B:col10/1734439222591/Put/seqid=0 2024-12-17T12:40:24,308 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/1d7ac7c71d884b66a9f00846bf5c4841 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/1d7ac7c71d884b66a9f00846bf5c4841 2024-12-17T12:40:24,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742338_1514 (size=12051) 2024-12-17T12:40:24,312 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/592ff35fc3fc4a2fbbff77e38ffb7732 2024-12-17T12:40:24,313 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/60ae712495274839a370ee9ce50415c1 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/60ae712495274839a370ee9ce50415c1 2024-12-17T12:40:24,314 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 05f19a912451e6e90726d599fdf98d6d/A of 05f19a912451e6e90726d599fdf98d6d into 1d7ac7c71d884b66a9f00846bf5c4841(size=30.5 K), total size for store is 30.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:40:24,314 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05f19a912451e6e90726d599fdf98d6d: 2024-12-17T12:40:24,314 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d., storeName=05f19a912451e6e90726d599fdf98d6d/A, priority=12, startTime=1734439223839; duration=0sec 2024-12-17T12:40:24,314 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:40:24,314 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05f19a912451e6e90726d599fdf98d6d:A 2024-12-17T12:40:24,317 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 05f19a912451e6e90726d599fdf98d6d/C of 05f19a912451e6e90726d599fdf98d6d into 60ae712495274839a370ee9ce50415c1(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:40:24,317 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05f19a912451e6e90726d599fdf98d6d: 2024-12-17T12:40:24,317 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d., storeName=05f19a912451e6e90726d599fdf98d6d/C, priority=12, startTime=1734439223840; duration=0sec 2024-12-17T12:40:24,317 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:40:24,317 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05f19a912451e6e90726d599fdf98d6d:C 2024-12-17T12:40:24,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/feff22f1ff6a4ba2b666b28f71546d73 is 50, key is test_row_0/C:col10/1734439222591/Put/seqid=0 2024-12-17T12:40:24,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742339_1515 (size=12051) 2024-12-17T12:40:24,328 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/feff22f1ff6a4ba2b666b28f71546d73 2024-12-17T12:40:24,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/3da0081aee2e4492beb3d1c2d162f762 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/3da0081aee2e4492beb3d1c2d162f762 2024-12-17T12:40:24,334 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/3da0081aee2e4492beb3d1c2d162f762, entries=150, sequenceid=130, filesize=30.3 K 2024-12-17T12:40:24,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/592ff35fc3fc4a2fbbff77e38ffb7732 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/592ff35fc3fc4a2fbbff77e38ffb7732 2024-12-17T12:40:24,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,337 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/592ff35fc3fc4a2fbbff77e38ffb7732, entries=150, sequenceid=130, filesize=11.8 K 2024-12-17T12:40:24,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/feff22f1ff6a4ba2b666b28f71546d73 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/feff22f1ff6a4ba2b666b28f71546d73 2024-12-17T12:40:24,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,340 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/feff22f1ff6a4ba2b666b28f71546d73, entries=150, sequenceid=130, filesize=11.8 K 2024-12-17T12:40:24,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,341 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=0 B/0 for 05f19a912451e6e90726d599fdf98d6d in 501ms, sequenceid=130, compaction requested=false 2024-12-17T12:40:24,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.HRegion(2538): Flush status journal for 05f19a912451e6e90726d599fdf98d6d: 2024-12-17T12:40:24,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:24,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=148 2024-12-17T12:40:24,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4106): Remote procedure done, pid=148 2024-12-17T12:40:24,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,343 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=148, resume processing ppid=147 2024-12-17T12:40:24,343 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, ppid=147, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 958 msec 2024-12-17T12:40:24,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,344 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=147, table=TestAcidGuarantees in 961 msec 2024-12-17T12:40:24,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-12-17T12:40:24,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,486 INFO [Thread-2164 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 147 completed 2024-12-17T12:40:24,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,487 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-17T12:40:24,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=149, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=149, table=TestAcidGuarantees 2024-12-17T12:40:24,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,488 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=149, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=149, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-17T12:40:24,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-12-17T12:40:24,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,489 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=149, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=149, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-17T12:40:24,489 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=150, ppid=149, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-17T12:40:24,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-12-17T12:40:24,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,640 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:24,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,640 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=150 2024-12-17T12:40:24,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:24,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HRegion(2538): Flush status journal for 05f19a912451e6e90726d599fdf98d6d: 2024-12-17T12:40:24,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:24,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=150 2024-12-17T12:40:24,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4106): Remote procedure done, pid=150 2024-12-17T12:40:24,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,643 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=150, resume processing ppid=149 2024-12-17T12:40:24,643 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, ppid=149, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 153 msec 2024-12-17T12:40:24,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,644 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=149, table=TestAcidGuarantees in 156 msec 2024-12-17T12:40:24,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:24,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,750 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 05f19a912451e6e90726d599fdf98d6d 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-17T12:40:24,750 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05f19a912451e6e90726d599fdf98d6d, store=A 2024-12-17T12:40:24,750 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:24,750 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05f19a912451e6e90726d599fdf98d6d, store=B 2024-12-17T12:40:24,750 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:24,750 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05f19a912451e6e90726d599fdf98d6d, store=C 2024-12-17T12:40:24,750 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:24,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,757 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217f01fd69f061641cea1df7062dd44e2c5_05f19a912451e6e90726d599fdf98d6d is 50, key is test_row_0/A:col10/1734439224742/Put/seqid=0 2024-12-17T12:40:24,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,782 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:24,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439284780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:24,783 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:24,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43936 deadline: 1734439284781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:24,783 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:24,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43926 deadline: 1734439284782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:24,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742341_1517 (size=24758) 2024-12-17T12:40:24,787 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:24,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-12-17T12:40:24,790 INFO [Thread-2164 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 149 completed 2024-12-17T12:40:24,791 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-17T12:40:24,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=151, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=151, table=TestAcidGuarantees 2024-12-17T12:40:24,792 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217f01fd69f061641cea1df7062dd44e2c5_05f19a912451e6e90726d599fdf98d6d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217f01fd69f061641cea1df7062dd44e2c5_05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:24,793 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=151, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=151, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-17T12:40:24,794 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/715b2c17c24446eb9803820f8d79d242, store: [table=TestAcidGuarantees family=A region=05f19a912451e6e90726d599fdf98d6d] 2024-12-17T12:40:24,795 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=151, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=151, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-17T12:40:24,795 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=152, ppid=151, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-17T12:40:24,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-12-17T12:40:24,800 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/715b2c17c24446eb9803820f8d79d242 is 175, key is test_row_0/A:col10/1734439224742/Put/seqid=0 2024-12-17T12:40:24,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742340_1516 (size=74394) 2024-12-17T12:40:24,802 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=145, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/715b2c17c24446eb9803820f8d79d242 2024-12-17T12:40:24,811 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/31eb1e202ac34306bf2db4991d42b89e is 50, key is test_row_0/B:col10/1734439224742/Put/seqid=0 2024-12-17T12:40:24,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742342_1518 (size=12151) 2024-12-17T12:40:24,829 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=145 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/31eb1e202ac34306bf2db4991d42b89e 2024-12-17T12:40:24,836 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/09a58652ceb3473b8c115bcd6a48dc16 is 50, key is test_row_0/C:col10/1734439224742/Put/seqid=0 2024-12-17T12:40:24,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742343_1519 (size=12151) 2024-12-17T12:40:24,884 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:24,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439284883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:24,884 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:24,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43936 deadline: 1734439284883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:24,885 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:24,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43926 deadline: 1734439284884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:24,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-12-17T12:40:24,946 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:24,947 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-12-17T12:40:24,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:24,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. as already flushing 2024-12-17T12:40:24,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:24,947 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:24,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:24,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:25,086 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:25,086 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:25,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43936 deadline: 1734439285085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:25,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439285085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:25,086 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:25,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43926 deadline: 1734439285085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:25,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-12-17T12:40:25,099 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:25,099 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-12-17T12:40:25,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:25,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. as already flushing 2024-12-17T12:40:25,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:25,099 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:25,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:25,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:25,242 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=145 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/09a58652ceb3473b8c115bcd6a48dc16 2024-12-17T12:40:25,245 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/715b2c17c24446eb9803820f8d79d242 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/715b2c17c24446eb9803820f8d79d242 2024-12-17T12:40:25,248 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/715b2c17c24446eb9803820f8d79d242, entries=400, sequenceid=145, filesize=72.7 K 2024-12-17T12:40:25,248 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/31eb1e202ac34306bf2db4991d42b89e as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/31eb1e202ac34306bf2db4991d42b89e 2024-12-17T12:40:25,251 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:25,251 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-12-17T12:40:25,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:25,251 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/31eb1e202ac34306bf2db4991d42b89e, entries=150, sequenceid=145, filesize=11.9 K 2024-12-17T12:40:25,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. as already flushing 2024-12-17T12:40:25,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:25,251 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:25,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:25,252 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/09a58652ceb3473b8c115bcd6a48dc16 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/09a58652ceb3473b8c115bcd6a48dc16 2024-12-17T12:40:25,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:25,255 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/09a58652ceb3473b8c115bcd6a48dc16, entries=150, sequenceid=145, filesize=11.9 K 2024-12-17T12:40:25,255 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 05f19a912451e6e90726d599fdf98d6d in 505ms, sequenceid=145, compaction requested=true 2024-12-17T12:40:25,256 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 05f19a912451e6e90726d599fdf98d6d: 2024-12-17T12:40:25,256 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05f19a912451e6e90726d599fdf98d6d:A, priority=-2147483648, current under compaction store size is 1 2024-12-17T12:40:25,256 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:40:25,256 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:40:25,256 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05f19a912451e6e90726d599fdf98d6d:B, priority=-2147483648, current under compaction store size is 2 2024-12-17T12:40:25,256 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:40:25,256 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:40:25,256 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05f19a912451e6e90726d599fdf98d6d:C, priority=-2147483648, current under compaction store size is 3 2024-12-17T12:40:25,256 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:40:25,256 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36443 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:40:25,256 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 136594 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:40:25,256 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 05f19a912451e6e90726d599fdf98d6d/B is initiating minor compaction (all files) 2024-12-17T12:40:25,256 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): 05f19a912451e6e90726d599fdf98d6d/A is initiating minor compaction (all files) 2024-12-17T12:40:25,257 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05f19a912451e6e90726d599fdf98d6d/A in TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:25,257 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05f19a912451e6e90726d599fdf98d6d/B in TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:25,257 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/a0ed9c1d11844c9288159a1884b054c4, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/592ff35fc3fc4a2fbbff77e38ffb7732, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/31eb1e202ac34306bf2db4991d42b89e] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp, totalSize=35.6 K 2024-12-17T12:40:25,257 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/1d7ac7c71d884b66a9f00846bf5c4841, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/3da0081aee2e4492beb3d1c2d162f762, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/715b2c17c24446eb9803820f8d79d242] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp, totalSize=133.4 K 2024-12-17T12:40:25,257 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:25,257 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. files: [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/1d7ac7c71d884b66a9f00846bf5c4841, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/3da0081aee2e4492beb3d1c2d162f762, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/715b2c17c24446eb9803820f8d79d242] 2024-12-17T12:40:25,259 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting a0ed9c1d11844c9288159a1884b054c4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1734439221524 2024-12-17T12:40:25,259 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1d7ac7c71d884b66a9f00846bf5c4841, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1734439221524 2024-12-17T12:40:25,259 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 592ff35fc3fc4a2fbbff77e38ffb7732, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1734439222591 2024-12-17T12:40:25,259 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3da0081aee2e4492beb3d1c2d162f762, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1734439222591 2024-12-17T12:40:25,263 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 31eb1e202ac34306bf2db4991d42b89e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=145, earliestPutTs=1734439224742 2024-12-17T12:40:25,263 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 715b2c17c24446eb9803820f8d79d242, keycount=400, bloomtype=ROW, size=72.7 K, encoding=NONE, compression=NONE, seqNum=145, earliestPutTs=1734439224735 2024-12-17T12:40:25,270 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05f19a912451e6e90726d599fdf98d6d#B#compaction#434 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:40:25,270 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/3fe0129e16ae467094af857240e86336 is 50, key is test_row_0/B:col10/1734439224742/Put/seqid=0 2024-12-17T12:40:25,271 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=05f19a912451e6e90726d599fdf98d6d] 2024-12-17T12:40:25,292 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412174182f50c4e344391a3c0d083cecaefba_05f19a912451e6e90726d599fdf98d6d store=[table=TestAcidGuarantees family=A region=05f19a912451e6e90726d599fdf98d6d] 2024-12-17T12:40:25,294 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412174182f50c4e344391a3c0d083cecaefba_05f19a912451e6e90726d599fdf98d6d, store=[table=TestAcidGuarantees family=A region=05f19a912451e6e90726d599fdf98d6d] 2024-12-17T12:40:25,294 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412174182f50c4e344391a3c0d083cecaefba_05f19a912451e6e90726d599fdf98d6d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=05f19a912451e6e90726d599fdf98d6d] 2024-12-17T12:40:25,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742344_1520 (size=12493) 2024-12-17T12:40:25,305 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/3fe0129e16ae467094af857240e86336 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/3fe0129e16ae467094af857240e86336 2024-12-17T12:40:25,310 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 05f19a912451e6e90726d599fdf98d6d/B of 05f19a912451e6e90726d599fdf98d6d into 3fe0129e16ae467094af857240e86336(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:40:25,310 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05f19a912451e6e90726d599fdf98d6d: 2024-12-17T12:40:25,310 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d., storeName=05f19a912451e6e90726d599fdf98d6d/B, priority=13, startTime=1734439225256; duration=0sec 2024-12-17T12:40:25,310 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:40:25,310 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05f19a912451e6e90726d599fdf98d6d:B 2024-12-17T12:40:25,310 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:40:25,312 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36443 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:40:25,312 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 05f19a912451e6e90726d599fdf98d6d/C is initiating minor compaction (all files) 2024-12-17T12:40:25,312 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05f19a912451e6e90726d599fdf98d6d/C in TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:25,312 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/60ae712495274839a370ee9ce50415c1, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/feff22f1ff6a4ba2b666b28f71546d73, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/09a58652ceb3473b8c115bcd6a48dc16] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp, totalSize=35.6 K 2024-12-17T12:40:25,313 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 60ae712495274839a370ee9ce50415c1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1734439221524 2024-12-17T12:40:25,313 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting feff22f1ff6a4ba2b666b28f71546d73, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1734439222591 2024-12-17T12:40:25,313 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 09a58652ceb3473b8c115bcd6a48dc16, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=145, earliestPutTs=1734439224742 2024-12-17T12:40:25,322 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05f19a912451e6e90726d599fdf98d6d#C#compaction#436 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:40:25,322 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/61ffd7649f454989ab0df69fdabacf60 is 50, key is test_row_0/C:col10/1734439224742/Put/seqid=0 2024-12-17T12:40:25,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742345_1521 (size=4469) 2024-12-17T12:40:25,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742346_1522 (size=12493) 2024-12-17T12:40:25,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:25,391 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 05f19a912451e6e90726d599fdf98d6d 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-17T12:40:25,392 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05f19a912451e6e90726d599fdf98d6d, store=A 2024-12-17T12:40:25,392 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:25,393 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05f19a912451e6e90726d599fdf98d6d, store=B 2024-12-17T12:40:25,393 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:25,393 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05f19a912451e6e90726d599fdf98d6d, store=C 2024-12-17T12:40:25,393 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:25,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-12-17T12:40:25,399 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:25,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43926 deadline: 1734439285397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:25,400 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217f71fbd78f955481f897328c794db82ec_05f19a912451e6e90726d599fdf98d6d is 50, key is test_row_0/A:col10/1734439224778/Put/seqid=0 2024-12-17T12:40:25,403 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:25,404 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-12-17T12:40:25,404 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:25,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:25,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43936 deadline: 1734439285400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:25,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. as already flushing 2024-12-17T12:40:25,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:25,404 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:25,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:25,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:25,405 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:25,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439285404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:25,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742347_1523 (size=14794) 2024-12-17T12:40:25,503 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:25,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43926 deadline: 1734439285501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:25,506 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:25,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43936 deadline: 1734439285505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:25,508 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:25,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439285506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:25,556 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:25,556 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-12-17T12:40:25,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:25,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. as already flushing 2024-12-17T12:40:25,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:25,557 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:25,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:25,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:25,667 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:25,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43990 deadline: 1734439285666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:25,668 DEBUG [Thread-2158 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4132 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d., hostname=681c08bfdbdf,36491,1734439058372, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor41.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-17T12:40:25,706 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:25,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43926 deadline: 1734439285705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:25,709 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:25,709 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:25,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43936 deadline: 1734439285708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:25,709 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-12-17T12:40:25,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:25,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. as already flushing 2024-12-17T12:40:25,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:25,709 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:25,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:25,710 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:25,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439285708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:25,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:25,725 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05f19a912451e6e90726d599fdf98d6d#A#compaction#435 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:40:25,726 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/9db7dbf316f34f49ad74f3bc9037b54f is 175, key is test_row_0/A:col10/1734439224742/Put/seqid=0 2024-12-17T12:40:25,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742348_1524 (size=31447) 2024-12-17T12:40:25,736 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/61ffd7649f454989ab0df69fdabacf60 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/61ffd7649f454989ab0df69fdabacf60 2024-12-17T12:40:25,740 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 05f19a912451e6e90726d599fdf98d6d/C of 05f19a912451e6e90726d599fdf98d6d into 61ffd7649f454989ab0df69fdabacf60(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:40:25,740 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05f19a912451e6e90726d599fdf98d6d: 2024-12-17T12:40:25,740 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d., storeName=05f19a912451e6e90726d599fdf98d6d/C, priority=13, startTime=1734439225256; duration=0sec 2024-12-17T12:40:25,741 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:40:25,741 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05f19a912451e6e90726d599fdf98d6d:C 2024-12-17T12:40:25,825 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:25,828 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217f71fbd78f955481f897328c794db82ec_05f19a912451e6e90726d599fdf98d6d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217f71fbd78f955481f897328c794db82ec_05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:25,828 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/8c40f2a813844d56ab6ae8c94c095114, store: [table=TestAcidGuarantees family=A region=05f19a912451e6e90726d599fdf98d6d] 2024-12-17T12:40:25,829 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/8c40f2a813844d56ab6ae8c94c095114 is 175, key is test_row_0/A:col10/1734439224778/Put/seqid=0 2024-12-17T12:40:25,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742349_1525 (size=39749) 2024-12-17T12:40:25,861 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:25,861 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-12-17T12:40:25,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:25,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. as already flushing 2024-12-17T12:40:25,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:25,861 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:25,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:25,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:25,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-12-17T12:40:26,008 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:26,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43926 deadline: 1734439286007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:26,012 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:26,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43936 deadline: 1734439286011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:26,013 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:26,013 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-12-17T12:40:26,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:26,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. as already flushing 2024-12-17T12:40:26,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:26,013 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:26,013 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:26,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:26,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439286012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:26,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:26,132 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/9db7dbf316f34f49ad74f3bc9037b54f as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/9db7dbf316f34f49ad74f3bc9037b54f 2024-12-17T12:40:26,136 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 05f19a912451e6e90726d599fdf98d6d/A of 05f19a912451e6e90726d599fdf98d6d into 9db7dbf316f34f49ad74f3bc9037b54f(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:40:26,136 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05f19a912451e6e90726d599fdf98d6d: 2024-12-17T12:40:26,136 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d., storeName=05f19a912451e6e90726d599fdf98d6d/A, priority=13, startTime=1734439225256; duration=0sec 2024-12-17T12:40:26,136 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:40:26,136 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05f19a912451e6e90726d599fdf98d6d:A 2024-12-17T12:40:26,167 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:26,167 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-12-17T12:40:26,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:26,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. as already flushing 2024-12-17T12:40:26,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:26,167 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:26,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:26,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:26,232 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=174, memsize=55.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/8c40f2a813844d56ab6ae8c94c095114 2024-12-17T12:40:26,237 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/2073930ddf7c4e4ba03ee402d081ad11 is 50, key is test_row_0/B:col10/1734439224778/Put/seqid=0 2024-12-17T12:40:26,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742350_1526 (size=12151) 2024-12-17T12:40:26,319 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:26,319 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-12-17T12:40:26,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:26,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. as already flushing 2024-12-17T12:40:26,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:26,319 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:26,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:26,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:26,471 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:26,471 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-12-17T12:40:26,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:26,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. as already flushing 2024-12-17T12:40:26,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:26,471 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:26,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:26,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:26,513 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:26,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43926 deadline: 1734439286512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:26,514 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:26,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43936 deadline: 1734439286513, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:26,518 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:26,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439286517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:26,623 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:26,623 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-12-17T12:40:26,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:26,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. as already flushing 2024-12-17T12:40:26,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:26,623 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:26,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:26,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:26,625 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:26,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43902 deadline: 1734439286624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:26,625 DEBUG [Thread-2160 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8164 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d., hostname=681c08bfdbdf,36491,1734439058372, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor41.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-17T12:40:26,657 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/2073930ddf7c4e4ba03ee402d081ad11 2024-12-17T12:40:26,662 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/7e38f0bc702041349d8053c53c3002be is 50, key is test_row_0/C:col10/1734439224778/Put/seqid=0 2024-12-17T12:40:26,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742351_1527 (size=12151) 2024-12-17T12:40:26,775 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:26,775 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-12-17T12:40:26,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:26,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. as already flushing 2024-12-17T12:40:26,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:26,776 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:26,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:26,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:26,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-12-17T12:40:26,927 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:26,927 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-12-17T12:40:26,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:26,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. as already flushing 2024-12-17T12:40:26,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:26,928 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:26,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:26,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:27,068 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/7e38f0bc702041349d8053c53c3002be 2024-12-17T12:40:27,071 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/8c40f2a813844d56ab6ae8c94c095114 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/8c40f2a813844d56ab6ae8c94c095114 2024-12-17T12:40:27,073 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/8c40f2a813844d56ab6ae8c94c095114, entries=200, sequenceid=174, filesize=38.8 K 2024-12-17T12:40:27,073 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/2073930ddf7c4e4ba03ee402d081ad11 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/2073930ddf7c4e4ba03ee402d081ad11 2024-12-17T12:40:27,076 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/2073930ddf7c4e4ba03ee402d081ad11, entries=150, sequenceid=174, filesize=11.9 K 2024-12-17T12:40:27,077 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/7e38f0bc702041349d8053c53c3002be as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/7e38f0bc702041349d8053c53c3002be 2024-12-17T12:40:27,079 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/7e38f0bc702041349d8053c53c3002be, entries=150, sequenceid=174, filesize=11.9 K 2024-12-17T12:40:27,079 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:27,080 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-12-17T12:40:27,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:27,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. as already flushing 2024-12-17T12:40:27,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:27,080 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:27,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:27,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:27,081 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for 05f19a912451e6e90726d599fdf98d6d in 1690ms, sequenceid=174, compaction requested=false 2024-12-17T12:40:27,081 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 05f19a912451e6e90726d599fdf98d6d: 2024-12-17T12:40:27,231 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:27,232 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-12-17T12:40:27,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:27,232 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2837): Flushing 05f19a912451e6e90726d599fdf98d6d 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-17T12:40:27,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05f19a912451e6e90726d599fdf98d6d, store=A 2024-12-17T12:40:27,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:27,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05f19a912451e6e90726d599fdf98d6d, store=B 2024-12-17T12:40:27,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:27,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05f19a912451e6e90726d599fdf98d6d, store=C 2024-12-17T12:40:27,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:27,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412172892a7cb8c85451bad1e951a5dbeec74_05f19a912451e6e90726d599fdf98d6d is 50, key is test_row_0/A:col10/1734439225395/Put/seqid=0 2024-12-17T12:40:27,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742352_1528 (size=12304) 2024-12-17T12:40:27,518 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. as already flushing 2024-12-17T12:40:27,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:27,537 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:27,537 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:27,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43926 deadline: 1734439287535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:27,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439287536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:27,538 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:27,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43936 deadline: 1734439287537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:27,639 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:27,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43926 deadline: 1734439287638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:27,640 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:27,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439287638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:27,640 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:27,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43936 deadline: 1734439287639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:27,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:27,643 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412172892a7cb8c85451bad1e951a5dbeec74_05f19a912451e6e90726d599fdf98d6d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412172892a7cb8c85451bad1e951a5dbeec74_05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:27,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/cb1c4a415d37477db45bd01a25037628, store: [table=TestAcidGuarantees family=A region=05f19a912451e6e90726d599fdf98d6d] 2024-12-17T12:40:27,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/cb1c4a415d37477db45bd01a25037628 is 175, key is test_row_0/A:col10/1734439225395/Put/seqid=0 2024-12-17T12:40:27,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742353_1529 (size=31105) 2024-12-17T12:40:27,657 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=184, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/cb1c4a415d37477db45bd01a25037628 2024-12-17T12:40:27,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/cf770b61021b41fa973b43b9928c1d3b is 50, key is test_row_0/B:col10/1734439225395/Put/seqid=0 2024-12-17T12:40:27,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742354_1530 (size=12151) 2024-12-17T12:40:27,842 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:27,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439287841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:27,842 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:27,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43936 deadline: 1734439287841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:27,843 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:27,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43926 deadline: 1734439287841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:28,064 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=184 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/cf770b61021b41fa973b43b9928c1d3b 2024-12-17T12:40:28,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/d217ebc629ce4e8db8c37fa641829ee8 is 50, key is test_row_0/C:col10/1734439225395/Put/seqid=0 2024-12-17T12:40:28,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742355_1531 (size=12151) 2024-12-17T12:40:28,143 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:28,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43936 deadline: 1734439288143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:28,146 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:28,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439288144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:28,146 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:28,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43926 deadline: 1734439288145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:28,472 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=184 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/d217ebc629ce4e8db8c37fa641829ee8 2024-12-17T12:40:28,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/cb1c4a415d37477db45bd01a25037628 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/cb1c4a415d37477db45bd01a25037628 2024-12-17T12:40:28,478 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/cb1c4a415d37477db45bd01a25037628, entries=150, sequenceid=184, filesize=30.4 K 2024-12-17T12:40:28,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/cf770b61021b41fa973b43b9928c1d3b as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/cf770b61021b41fa973b43b9928c1d3b 2024-12-17T12:40:28,481 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/cf770b61021b41fa973b43b9928c1d3b, entries=150, sequenceid=184, filesize=11.9 K 2024-12-17T12:40:28,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/d217ebc629ce4e8db8c37fa641829ee8 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/d217ebc629ce4e8db8c37fa641829ee8 2024-12-17T12:40:28,484 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/d217ebc629ce4e8db8c37fa641829ee8, entries=150, sequenceid=184, filesize=11.9 K 2024-12-17T12:40:28,485 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=167.72 KB/171750 for 05f19a912451e6e90726d599fdf98d6d in 1253ms, sequenceid=184, compaction requested=true 2024-12-17T12:40:28,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2538): Flush status journal for 05f19a912451e6e90726d599fdf98d6d: 2024-12-17T12:40:28,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:28,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=152 2024-12-17T12:40:28,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4106): Remote procedure done, pid=152 2024-12-17T12:40:28,487 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=152, resume processing ppid=151 2024-12-17T12:40:28,487 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, ppid=151, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.6910 sec 2024-12-17T12:40:28,488 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=151, table=TestAcidGuarantees in 3.6960 sec 2024-12-17T12:40:28,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:28,647 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 05f19a912451e6e90726d599fdf98d6d 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-12-17T12:40:28,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05f19a912451e6e90726d599fdf98d6d, store=A 2024-12-17T12:40:28,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:28,648 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05f19a912451e6e90726d599fdf98d6d, store=B 2024-12-17T12:40:28,648 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:28,648 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05f19a912451e6e90726d599fdf98d6d, store=C 2024-12-17T12:40:28,648 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:28,652 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121737e9f1a713264c05a3f19c16089ecdb3_05f19a912451e6e90726d599fdf98d6d is 50, key is test_row_0/A:col10/1734439228646/Put/seqid=0 2024-12-17T12:40:28,655 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:28,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43936 deadline: 1734439288653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:28,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742356_1532 (size=12304) 2024-12-17T12:40:28,656 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:28,656 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:28,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43926 deadline: 1734439288653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:28,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439288653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:28,757 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:28,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43936 deadline: 1734439288756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:28,757 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:28,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43926 deadline: 1734439288756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:28,757 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:28,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439288756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:28,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-12-17T12:40:28,900 INFO [Thread-2164 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 151 completed 2024-12-17T12:40:28,901 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-17T12:40:28,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=153, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=153, table=TestAcidGuarantees 2024-12-17T12:40:28,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-17T12:40:28,902 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=153, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=153, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-17T12:40:28,902 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=153, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=153, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-17T12:40:28,902 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=154, ppid=153, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-17T12:40:28,958 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:28,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43936 deadline: 1734439288958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:28,959 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:28,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43926 deadline: 1734439288958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:28,959 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:28,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439288958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:29,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-17T12:40:29,053 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:29,054 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-17T12:40:29,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:29,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. as already flushing 2024-12-17T12:40:29,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:29,054 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:29,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:29,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:29,056 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:29,058 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121737e9f1a713264c05a3f19c16089ecdb3_05f19a912451e6e90726d599fdf98d6d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121737e9f1a713264c05a3f19c16089ecdb3_05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:29,067 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/c61460c892c2461c81632c07c1bc3b8d, store: [table=TestAcidGuarantees family=A region=05f19a912451e6e90726d599fdf98d6d] 2024-12-17T12:40:29,067 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/c61460c892c2461c81632c07c1bc3b8d is 175, key is test_row_0/A:col10/1734439228646/Put/seqid=0 2024-12-17T12:40:29,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742357_1533 (size=31105) 2024-12-17T12:40:29,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-17T12:40:29,205 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:29,206 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-17T12:40:29,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:29,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. as already flushing 2024-12-17T12:40:29,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:29,206 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:29,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:29,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:29,261 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:29,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43936 deadline: 1734439289260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:29,262 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:29,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439289261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:29,263 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:29,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43926 deadline: 1734439289262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:29,358 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:29,358 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-17T12:40:29,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:29,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. as already flushing 2024-12-17T12:40:29,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:29,358 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:29,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:29,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:29,471 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=213, memsize=58.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/c61460c892c2461c81632c07c1bc3b8d 2024-12-17T12:40:29,477 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/eecac2f5dee848ebbbf2ebe7808e6931 is 50, key is test_row_0/B:col10/1734439228646/Put/seqid=0 2024-12-17T12:40:29,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742358_1534 (size=12151) 2024-12-17T12:40:29,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-17T12:40:29,510 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:29,510 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-17T12:40:29,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:29,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. as already flushing 2024-12-17T12:40:29,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:29,511 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:29,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:29,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:29,662 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:29,663 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-17T12:40:29,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:29,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. as already flushing 2024-12-17T12:40:29,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:29,663 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:29,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:29,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:29,707 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:29,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43990 deadline: 1734439289705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:29,708 DEBUG [Thread-2158 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8172 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d., hostname=681c08bfdbdf,36491,1734439058372, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor41.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-17T12:40:29,765 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:29,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439289763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:29,766 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:29,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43936 deadline: 1734439289765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:29,768 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:29,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43926 deadline: 1734439289767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:29,815 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:29,815 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-17T12:40:29,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:29,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. as already flushing 2024-12-17T12:40:29,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:29,815 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:29,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:29,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:29,881 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/eecac2f5dee848ebbbf2ebe7808e6931 2024-12-17T12:40:29,900 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/e2acedf7a49b44319a5b5005b1ed8101 is 50, key is test_row_0/C:col10/1734439228646/Put/seqid=0 2024-12-17T12:40:29,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742359_1535 (size=12151) 2024-12-17T12:40:29,967 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:29,967 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-17T12:40:29,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:29,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. as already flushing 2024-12-17T12:40:29,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:29,967 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:29,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:29,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:30,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-17T12:40:30,119 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:30,119 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-17T12:40:30,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:30,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. as already flushing 2024-12-17T12:40:30,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:30,120 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:30,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:30,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:30,271 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:30,272 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-17T12:40:30,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:30,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. as already flushing 2024-12-17T12:40:30,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:30,272 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:30,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:30,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:30,303 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/e2acedf7a49b44319a5b5005b1ed8101 2024-12-17T12:40:30,306 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/c61460c892c2461c81632c07c1bc3b8d as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/c61460c892c2461c81632c07c1bc3b8d 2024-12-17T12:40:30,308 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/c61460c892c2461c81632c07c1bc3b8d, entries=150, sequenceid=213, filesize=30.4 K 2024-12-17T12:40:30,309 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/eecac2f5dee848ebbbf2ebe7808e6931 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/eecac2f5dee848ebbbf2ebe7808e6931 2024-12-17T12:40:30,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,312 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/eecac2f5dee848ebbbf2ebe7808e6931, entries=150, sequenceid=213, filesize=11.9 K 2024-12-17T12:40:30,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,312 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/e2acedf7a49b44319a5b5005b1ed8101 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/e2acedf7a49b44319a5b5005b1ed8101 2024-12-17T12:40:30,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,315 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/e2acedf7a49b44319a5b5005b1ed8101, entries=150, sequenceid=213, filesize=11.9 K 2024-12-17T12:40:30,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,315 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=33.54 KB/34350 for 05f19a912451e6e90726d599fdf98d6d in 1668ms, sequenceid=213, compaction requested=true 2024-12-17T12:40:30,316 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 05f19a912451e6e90726d599fdf98d6d: 2024-12-17T12:40:30,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,316 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05f19a912451e6e90726d599fdf98d6d:A, priority=-2147483648, current under compaction store size is 1 2024-12-17T12:40:30,316 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:40:30,316 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-17T12:40:30,316 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-17T12:40:30,316 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05f19a912451e6e90726d599fdf98d6d:B, priority=-2147483648, current under compaction store size is 2 2024-12-17T12:40:30,316 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:40:30,316 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05f19a912451e6e90726d599fdf98d6d:C, priority=-2147483648, current under compaction store size is 3 2024-12-17T12:40:30,316 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:40:30,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,318 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 133406 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-17T12:40:30,318 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48946 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-17T12:40:30,318 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): 05f19a912451e6e90726d599fdf98d6d/A is initiating minor compaction (all files) 2024-12-17T12:40:30,318 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 05f19a912451e6e90726d599fdf98d6d/B is initiating minor compaction (all files) 2024-12-17T12:40:30,318 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05f19a912451e6e90726d599fdf98d6d/B in TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:30,318 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05f19a912451e6e90726d599fdf98d6d/A in TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:30,318 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/9db7dbf316f34f49ad74f3bc9037b54f, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/8c40f2a813844d56ab6ae8c94c095114, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/cb1c4a415d37477db45bd01a25037628, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/c61460c892c2461c81632c07c1bc3b8d] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp, totalSize=130.3 K 2024-12-17T12:40:30,318 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/3fe0129e16ae467094af857240e86336, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/2073930ddf7c4e4ba03ee402d081ad11, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/cf770b61021b41fa973b43b9928c1d3b, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/eecac2f5dee848ebbbf2ebe7808e6931] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp, totalSize=47.8 K 2024-12-17T12:40:30,318 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:30,318 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. files: [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/9db7dbf316f34f49ad74f3bc9037b54f, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/8c40f2a813844d56ab6ae8c94c095114, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/cb1c4a415d37477db45bd01a25037628, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/c61460c892c2461c81632c07c1bc3b8d] 2024-12-17T12:40:30,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,319 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 3fe0129e16ae467094af857240e86336, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=145, earliestPutTs=1734439224742 2024-12-17T12:40:30,319 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9db7dbf316f34f49ad74f3bc9037b54f, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=145, earliestPutTs=1734439224742 2024-12-17T12:40:30,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,319 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 2073930ddf7c4e4ba03ee402d081ad11, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1734439224778 2024-12-17T12:40:30,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,319 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8c40f2a813844d56ab6ae8c94c095114, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1734439224778 2024-12-17T12:40:30,319 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting cf770b61021b41fa973b43b9928c1d3b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=184, earliestPutTs=1734439225395 2024-12-17T12:40:30,319 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting cb1c4a415d37477db45bd01a25037628, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=184, earliestPutTs=1734439225395 2024-12-17T12:40:30,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,320 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting eecac2f5dee848ebbbf2ebe7808e6931, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1734439227534 2024-12-17T12:40:30,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,320 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting c61460c892c2461c81632c07c1bc3b8d, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1734439227534 2024-12-17T12:40:30,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,327 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05f19a912451e6e90726d599fdf98d6d#B#compaction#446 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:40:30,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,328 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/feba350231e747018ecaf68a046821b0 is 50, key is test_row_0/B:col10/1734439228646/Put/seqid=0 2024-12-17T12:40:30,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,331 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=05f19a912451e6e90726d599fdf98d6d] 2024-12-17T12:40:30,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,336 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121734e2b2d702da416ebaa888e6b8b6dd48_05f19a912451e6e90726d599fdf98d6d store=[table=TestAcidGuarantees family=A region=05f19a912451e6e90726d599fdf98d6d] 2024-12-17T12:40:30,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,338 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121734e2b2d702da416ebaa888e6b8b6dd48_05f19a912451e6e90726d599fdf98d6d, store=[table=TestAcidGuarantees family=A region=05f19a912451e6e90726d599fdf98d6d] 2024-12-17T12:40:30,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,338 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121734e2b2d702da416ebaa888e6b8b6dd48_05f19a912451e6e90726d599fdf98d6d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=05f19a912451e6e90726d599fdf98d6d] 2024-12-17T12:40:30,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742360_1536 (size=12629) 2024-12-17T12:40:30,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742361_1537 (size=4469) 2024-12-17T12:40:30,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,424 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:30,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,424 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-17T12:40:30,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:30,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,424 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2837): Flushing 05f19a912451e6e90726d599fdf98d6d 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-17T12:40:30,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05f19a912451e6e90726d599fdf98d6d, store=A 2024-12-17T12:40:30,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:30,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05f19a912451e6e90726d599fdf98d6d, store=B 2024-12-17T12:40:30,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:30,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05f19a912451e6e90726d599fdf98d6d, store=C 2024-12-17T12:40:30,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:30,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412170de20ed96097471f9dac55d17511ec92_05f19a912451e6e90726d599fdf98d6d is 50, key is test_row_0/A:col10/1734439228651/Put/seqid=0 2024-12-17T12:40:30,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742362_1538 (size=9814) 2024-12-17T12:40:30,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,744 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/feba350231e747018ecaf68a046821b0 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/feba350231e747018ecaf68a046821b0 2024-12-17T12:40:30,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,748 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 05f19a912451e6e90726d599fdf98d6d/B of 05f19a912451e6e90726d599fdf98d6d into feba350231e747018ecaf68a046821b0(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:40:30,748 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05f19a912451e6e90726d599fdf98d6d: 2024-12-17T12:40:30,748 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d., storeName=05f19a912451e6e90726d599fdf98d6d/B, priority=12, startTime=1734439230316; duration=0sec 2024-12-17T12:40:30,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,748 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:40:30,748 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05f19a912451e6e90726d599fdf98d6d:B 2024-12-17T12:40:30,748 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-17T12:40:30,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,750 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48946 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-17T12:40:30,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,750 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 05f19a912451e6e90726d599fdf98d6d/C is initiating minor compaction (all files) 2024-12-17T12:40:30,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,750 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05f19a912451e6e90726d599fdf98d6d/C in TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:30,750 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/61ffd7649f454989ab0df69fdabacf60, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/7e38f0bc702041349d8053c53c3002be, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/d217ebc629ce4e8db8c37fa641829ee8, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/e2acedf7a49b44319a5b5005b1ed8101] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp, totalSize=47.8 K 2024-12-17T12:40:30,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,750 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 61ffd7649f454989ab0df69fdabacf60, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=145, earliestPutTs=1734439224742 2024-12-17T12:40:30,751 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 7e38f0bc702041349d8053c53c3002be, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1734439224778 2024-12-17T12:40:30,751 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting d217ebc629ce4e8db8c37fa641829ee8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=184, earliestPutTs=1734439225395 2024-12-17T12:40:30,751 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting e2acedf7a49b44319a5b5005b1ed8101, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1734439227534 2024-12-17T12:40:30,753 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05f19a912451e6e90726d599fdf98d6d#A#compaction#447 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:40:30,753 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/c962bec30aab4ef3b3364c209b1df0a8 is 175, key is test_row_0/A:col10/1734439228646/Put/seqid=0 2024-12-17T12:40:30,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,760 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05f19a912451e6e90726d599fdf98d6d#C#compaction#449 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:40:30,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,761 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/cc71621da8dd430895e7f72e8b06206d is 50, key is test_row_0/C:col10/1734439228646/Put/seqid=0 2024-12-17T12:40:30,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742363_1539 (size=31583) 2024-12-17T12:40:30,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,776 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/c962bec30aab4ef3b3364c209b1df0a8 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/c962bec30aab4ef3b3364c209b1df0a8 2024-12-17T12:40:30,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,780 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 05f19a912451e6e90726d599fdf98d6d/A of 05f19a912451e6e90726d599fdf98d6d into c962bec30aab4ef3b3364c209b1df0a8(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:40:30,780 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05f19a912451e6e90726d599fdf98d6d: 2024-12-17T12:40:30,780 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d., storeName=05f19a912451e6e90726d599fdf98d6d/A, priority=12, startTime=1734439230316; duration=0sec 2024-12-17T12:40:30,780 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:40:30,780 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05f19a912451e6e90726d599fdf98d6d:A 2024-12-17T12:40:30,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,782 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. as already flushing 2024-12-17T12:40:30,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:30,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742364_1540 (size=12629) 2024-12-17T12:40:30,806 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/cc71621da8dd430895e7f72e8b06206d as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/cc71621da8dd430895e7f72e8b06206d 2024-12-17T12:40:30,811 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 05f19a912451e6e90726d599fdf98d6d/C of 05f19a912451e6e90726d599fdf98d6d into cc71621da8dd430895e7f72e8b06206d(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:40:30,812 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05f19a912451e6e90726d599fdf98d6d: 2024-12-17T12:40:30,812 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d., storeName=05f19a912451e6e90726d599fdf98d6d/C, priority=12, startTime=1734439230316; duration=0sec 2024-12-17T12:40:30,812 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:40:30,812 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05f19a912451e6e90726d599fdf98d6d:C 2024-12-17T12:40:30,817 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:30,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439290813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:30,817 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:30,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43926 deadline: 1734439290814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:30,819 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:30,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43936 deadline: 1734439290817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:30,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:30,854 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412170de20ed96097471f9dac55d17511ec92_05f19a912451e6e90726d599fdf98d6d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412170de20ed96097471f9dac55d17511ec92_05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:30,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/2fa06d247c624c4ea8086a9bbe00c61d, store: [table=TestAcidGuarantees family=A region=05f19a912451e6e90726d599fdf98d6d] 2024-12-17T12:40:30,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/2fa06d247c624c4ea8086a9bbe00c61d is 175, key is test_row_0/A:col10/1734439228651/Put/seqid=0 2024-12-17T12:40:30,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742365_1541 (size=22461) 2024-12-17T12:40:30,862 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=221, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/2fa06d247c624c4ea8086a9bbe00c61d 2024-12-17T12:40:30,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/d72a424e8a89451cb942f7f54f0efe78 is 50, key is test_row_0/B:col10/1734439228651/Put/seqid=0 2024-12-17T12:40:30,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742366_1542 (size=9757) 2024-12-17T12:40:30,878 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=221 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/d72a424e8a89451cb942f7f54f0efe78 2024-12-17T12:40:30,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/55ecf9796c274f95a53c3498e9f95e4d is 50, key is test_row_0/C:col10/1734439228651/Put/seqid=0 2024-12-17T12:40:30,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742367_1543 (size=9757) 2024-12-17T12:40:30,919 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:30,919 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:30,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439290918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:30,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43926 deadline: 1734439290918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:30,921 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:30,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43936 deadline: 1734439290920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:31,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-17T12:40:31,122 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:31,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43926 deadline: 1734439291121, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:31,122 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:31,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439291121, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:31,123 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:31,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43936 deadline: 1734439291122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:31,293 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=221 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/55ecf9796c274f95a53c3498e9f95e4d 2024-12-17T12:40:31,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/2fa06d247c624c4ea8086a9bbe00c61d as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/2fa06d247c624c4ea8086a9bbe00c61d 2024-12-17T12:40:31,298 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/2fa06d247c624c4ea8086a9bbe00c61d, entries=100, sequenceid=221, filesize=21.9 K 2024-12-17T12:40:31,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/d72a424e8a89451cb942f7f54f0efe78 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/d72a424e8a89451cb942f7f54f0efe78 2024-12-17T12:40:31,304 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/d72a424e8a89451cb942f7f54f0efe78, entries=100, sequenceid=221, filesize=9.5 K 2024-12-17T12:40:31,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/55ecf9796c274f95a53c3498e9f95e4d as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/55ecf9796c274f95a53c3498e9f95e4d 2024-12-17T12:40:31,307 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/55ecf9796c274f95a53c3498e9f95e4d, entries=100, sequenceid=221, filesize=9.5 K 2024-12-17T12:40:31,308 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=167.72 KB/171750 for 05f19a912451e6e90726d599fdf98d6d in 883ms, sequenceid=221, compaction requested=false 2024-12-17T12:40:31,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2538): Flush status journal for 05f19a912451e6e90726d599fdf98d6d: 2024-12-17T12:40:31,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:31,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=154 2024-12-17T12:40:31,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4106): Remote procedure done, pid=154 2024-12-17T12:40:31,310 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=154, resume processing ppid=153 2024-12-17T12:40:31,310 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, ppid=153, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.4060 sec 2024-12-17T12:40:31,311 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=153, table=TestAcidGuarantees in 2.4090 sec 2024-12-17T12:40:31,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:31,424 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 05f19a912451e6e90726d599fdf98d6d 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-12-17T12:40:31,425 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05f19a912451e6e90726d599fdf98d6d, store=A 2024-12-17T12:40:31,425 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:31,425 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05f19a912451e6e90726d599fdf98d6d, store=B 2024-12-17T12:40:31,425 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:31,425 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05f19a912451e6e90726d599fdf98d6d, store=C 2024-12-17T12:40:31,425 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:31,430 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217e25e6e8fe82e4665ae9491972a9fb4c7_05f19a912451e6e90726d599fdf98d6d is 50, key is test_row_0/A:col10/1734439231423/Put/seqid=0 2024-12-17T12:40:31,430 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:31,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43926 deadline: 1734439291429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:31,430 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:31,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43936 deadline: 1734439291429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:31,431 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:31,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439291429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:31,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742368_1544 (size=12304) 2024-12-17T12:40:31,532 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:31,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43926 deadline: 1734439291531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:31,532 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:31,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43936 deadline: 1734439291531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:31,532 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:31,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439291531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:31,735 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:31,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43936 deadline: 1734439291734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:31,735 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:31,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439291734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:31,735 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:31,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43926 deadline: 1734439291734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:31,834 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:31,837 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217e25e6e8fe82e4665ae9491972a9fb4c7_05f19a912451e6e90726d599fdf98d6d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217e25e6e8fe82e4665ae9491972a9fb4c7_05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:31,837 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/947c77633dd8485e81c3f5213ccbf057, store: [table=TestAcidGuarantees family=A region=05f19a912451e6e90726d599fdf98d6d] 2024-12-17T12:40:31,838 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/947c77633dd8485e81c3f5213ccbf057 is 175, key is test_row_0/A:col10/1734439231423/Put/seqid=0 2024-12-17T12:40:31,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742369_1545 (size=31105) 2024-12-17T12:40:32,038 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:32,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43936 deadline: 1734439292037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:32,039 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:32,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439292038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:32,039 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:32,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43926 deadline: 1734439292038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:32,241 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=253, memsize=58.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/947c77633dd8485e81c3f5213ccbf057 2024-12-17T12:40:32,246 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/52c1cae2dad24879970ffcfd38a54f94 is 50, key is test_row_0/B:col10/1734439231423/Put/seqid=0 2024-12-17T12:40:32,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742370_1546 (size=12151) 2024-12-17T12:40:32,543 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:32,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439292542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:32,543 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:32,543 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:32,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43936 deadline: 1734439292542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:32,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43926 deadline: 1734439292542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:32,649 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/52c1cae2dad24879970ffcfd38a54f94 2024-12-17T12:40:32,654 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/744cc97aee7845bc813ec59bad9c7f04 is 50, key is test_row_0/C:col10/1734439231423/Put/seqid=0 2024-12-17T12:40:32,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742371_1547 (size=12151) 2024-12-17T12:40:33,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-17T12:40:33,006 INFO [Thread-2164 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 153 completed 2024-12-17T12:40:33,006 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-17T12:40:33,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=155, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees 2024-12-17T12:40:33,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-17T12:40:33,008 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=155, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-17T12:40:33,008 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=155, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-17T12:40:33,008 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-17T12:40:33,058 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/744cc97aee7845bc813ec59bad9c7f04 2024-12-17T12:40:33,061 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/947c77633dd8485e81c3f5213ccbf057 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/947c77633dd8485e81c3f5213ccbf057 2024-12-17T12:40:33,063 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/947c77633dd8485e81c3f5213ccbf057, entries=150, sequenceid=253, filesize=30.4 K 2024-12-17T12:40:33,063 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/52c1cae2dad24879970ffcfd38a54f94 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/52c1cae2dad24879970ffcfd38a54f94 2024-12-17T12:40:33,066 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/52c1cae2dad24879970ffcfd38a54f94, entries=150, sequenceid=253, filesize=11.9 K 2024-12-17T12:40:33,066 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/744cc97aee7845bc813ec59bad9c7f04 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/744cc97aee7845bc813ec59bad9c7f04 2024-12-17T12:40:33,068 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/744cc97aee7845bc813ec59bad9c7f04, entries=150, sequenceid=253, filesize=11.9 K 2024-12-17T12:40:33,069 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=40.25 KB/41220 for 05f19a912451e6e90726d599fdf98d6d in 1645ms, sequenceid=253, compaction requested=true 2024-12-17T12:40:33,069 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 05f19a912451e6e90726d599fdf98d6d: 2024-12-17T12:40:33,069 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05f19a912451e6e90726d599fdf98d6d:A, priority=-2147483648, current under compaction store size is 1 2024-12-17T12:40:33,069 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:40:33,069 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05f19a912451e6e90726d599fdf98d6d:B, priority=-2147483648, current under compaction store size is 2 2024-12-17T12:40:33,069 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:40:33,069 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:40:33,069 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:40:33,069 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05f19a912451e6e90726d599fdf98d6d:C, priority=-2147483648, current under compaction store size is 3 2024-12-17T12:40:33,069 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:40:33,070 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34537 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:40:33,070 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 05f19a912451e6e90726d599fdf98d6d/B is initiating minor compaction (all files) 2024-12-17T12:40:33,070 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05f19a912451e6e90726d599fdf98d6d/B in TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:33,070 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/feba350231e747018ecaf68a046821b0, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/d72a424e8a89451cb942f7f54f0efe78, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/52c1cae2dad24879970ffcfd38a54f94] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp, totalSize=33.7 K 2024-12-17T12:40:33,070 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:40:33,070 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): 05f19a912451e6e90726d599fdf98d6d/A is initiating minor compaction (all files) 2024-12-17T12:40:33,070 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05f19a912451e6e90726d599fdf98d6d/A in TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:33,070 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/c962bec30aab4ef3b3364c209b1df0a8, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/2fa06d247c624c4ea8086a9bbe00c61d, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/947c77633dd8485e81c3f5213ccbf057] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp, totalSize=83.2 K 2024-12-17T12:40:33,070 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:33,070 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. files: [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/c962bec30aab4ef3b3364c209b1df0a8, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/2fa06d247c624c4ea8086a9bbe00c61d, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/947c77633dd8485e81c3f5213ccbf057] 2024-12-17T12:40:33,071 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting feba350231e747018ecaf68a046821b0, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1734439227534 2024-12-17T12:40:33,071 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting c962bec30aab4ef3b3364c209b1df0a8, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1734439227534 2024-12-17T12:40:33,071 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting d72a424e8a89451cb942f7f54f0efe78, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1734439228651 2024-12-17T12:40:33,071 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2fa06d247c624c4ea8086a9bbe00c61d, keycount=100, bloomtype=ROW, size=21.9 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1734439228651 2024-12-17T12:40:33,071 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 52c1cae2dad24879970ffcfd38a54f94, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1734439230812 2024-12-17T12:40:33,071 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 947c77633dd8485e81c3f5213ccbf057, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1734439230812 2024-12-17T12:40:33,075 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=05f19a912451e6e90726d599fdf98d6d] 2024-12-17T12:40:33,076 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05f19a912451e6e90726d599fdf98d6d#B#compaction#455 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:40:33,076 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/05e92b2eda254d29b1b42bd12e48f9dc is 50, key is test_row_0/B:col10/1734439231423/Put/seqid=0 2024-12-17T12:40:33,077 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241217d64f274781c44c7e9e6c0c81712f6c5c_05f19a912451e6e90726d599fdf98d6d store=[table=TestAcidGuarantees family=A region=05f19a912451e6e90726d599fdf98d6d] 2024-12-17T12:40:33,079 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241217d64f274781c44c7e9e6c0c81712f6c5c_05f19a912451e6e90726d599fdf98d6d, store=[table=TestAcidGuarantees family=A region=05f19a912451e6e90726d599fdf98d6d] 2024-12-17T12:40:33,079 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217d64f274781c44c7e9e6c0c81712f6c5c_05f19a912451e6e90726d599fdf98d6d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=05f19a912451e6e90726d599fdf98d6d] 2024-12-17T12:40:33,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742372_1548 (size=12731) 2024-12-17T12:40:33,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742373_1549 (size=4469) 2024-12-17T12:40:33,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-17T12:40:33,159 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:33,159 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-12-17T12:40:33,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:33,160 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2837): Flushing 05f19a912451e6e90726d599fdf98d6d 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-12-17T12:40:33,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05f19a912451e6e90726d599fdf98d6d, store=A 2024-12-17T12:40:33,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:33,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05f19a912451e6e90726d599fdf98d6d, store=B 2024-12-17T12:40:33,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:33,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05f19a912451e6e90726d599fdf98d6d, store=C 2024-12-17T12:40:33,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:33,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217e529123ceeda42759a110c7883bf0595_05f19a912451e6e90726d599fdf98d6d is 50, key is test_row_0/A:col10/1734439231425/Put/seqid=0 2024-12-17T12:40:33,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742374_1550 (size=12454) 2024-12-17T12:40:33,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-17T12:40:33,488 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05f19a912451e6e90726d599fdf98d6d#A#compaction#456 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:40:33,489 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/e316fd97d279433c828037a6fcea1377 is 175, key is test_row_0/A:col10/1734439231423/Put/seqid=0 2024-12-17T12:40:33,489 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/05e92b2eda254d29b1b42bd12e48f9dc as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/05e92b2eda254d29b1b42bd12e48f9dc 2024-12-17T12:40:33,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742375_1551 (size=31685) 2024-12-17T12:40:33,493 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 05f19a912451e6e90726d599fdf98d6d/B of 05f19a912451e6e90726d599fdf98d6d into 05e92b2eda254d29b1b42bd12e48f9dc(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:40:33,493 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05f19a912451e6e90726d599fdf98d6d: 2024-12-17T12:40:33,493 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d., storeName=05f19a912451e6e90726d599fdf98d6d/B, priority=13, startTime=1734439233069; duration=0sec 2024-12-17T12:40:33,493 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:40:33,493 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05f19a912451e6e90726d599fdf98d6d:B 2024-12-17T12:40:33,493 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:40:33,494 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34537 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:40:33,494 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 05f19a912451e6e90726d599fdf98d6d/C is initiating minor compaction (all files) 2024-12-17T12:40:33,495 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05f19a912451e6e90726d599fdf98d6d/C in TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:33,495 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/cc71621da8dd430895e7f72e8b06206d, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/55ecf9796c274f95a53c3498e9f95e4d, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/744cc97aee7845bc813ec59bad9c7f04] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp, totalSize=33.7 K 2024-12-17T12:40:33,495 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting cc71621da8dd430895e7f72e8b06206d, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1734439227534 2024-12-17T12:40:33,495 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 55ecf9796c274f95a53c3498e9f95e4d, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1734439228651 2024-12-17T12:40:33,495 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/e316fd97d279433c828037a6fcea1377 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/e316fd97d279433c828037a6fcea1377 2024-12-17T12:40:33,495 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 744cc97aee7845bc813ec59bad9c7f04, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1734439230812 2024-12-17T12:40:33,499 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 05f19a912451e6e90726d599fdf98d6d/A of 05f19a912451e6e90726d599fdf98d6d into e316fd97d279433c828037a6fcea1377(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:40:33,499 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05f19a912451e6e90726d599fdf98d6d: 2024-12-17T12:40:33,499 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d., storeName=05f19a912451e6e90726d599fdf98d6d/A, priority=13, startTime=1734439233069; duration=0sec 2024-12-17T12:40:33,499 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:40:33,499 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05f19a912451e6e90726d599fdf98d6d:A 2024-12-17T12:40:33,500 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05f19a912451e6e90726d599fdf98d6d#C#compaction#458 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:40:33,501 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/29724745054743edb1d67fe2e5fcc288 is 50, key is test_row_0/C:col10/1734439231423/Put/seqid=0 2024-12-17T12:40:33,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742376_1552 (size=12731) 2024-12-17T12:40:33,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:33,552 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. as already flushing 2024-12-17T12:40:33,572 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:33,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439293570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:33,572 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:33,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43936 deadline: 1734439293570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:33,573 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:33,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43926 deadline: 1734439293571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:33,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:33,581 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217e529123ceeda42759a110c7883bf0595_05f19a912451e6e90726d599fdf98d6d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217e529123ceeda42759a110c7883bf0595_05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:33,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/70f4b71d220840059a76b2712cda91f0, store: [table=TestAcidGuarantees family=A region=05f19a912451e6e90726d599fdf98d6d] 2024-12-17T12:40:33,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/70f4b71d220840059a76b2712cda91f0 is 175, key is test_row_0/A:col10/1734439231425/Put/seqid=0 2024-12-17T12:40:33,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742377_1553 (size=31255) 2024-12-17T12:40:33,585 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=262, memsize=13.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/70f4b71d220840059a76b2712cda91f0 2024-12-17T12:40:33,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/7c268eb4a2e44ef2bb307cbc42e01b90 is 50, key is test_row_0/B:col10/1734439231425/Put/seqid=0 2024-12-17T12:40:33,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742378_1554 (size=12301) 2024-12-17T12:40:33,594 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=262 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/7c268eb4a2e44ef2bb307cbc42e01b90 2024-12-17T12:40:33,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/7b14fc95c3e0481ea2020d6a9e1ea7e2 is 50, key is test_row_0/C:col10/1734439231425/Put/seqid=0 2024-12-17T12:40:33,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742379_1555 (size=12301) 2024-12-17T12:40:33,603 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=262 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/7b14fc95c3e0481ea2020d6a9e1ea7e2 2024-12-17T12:40:33,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/70f4b71d220840059a76b2712cda91f0 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/70f4b71d220840059a76b2712cda91f0 2024-12-17T12:40:33,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-17T12:40:33,610 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/70f4b71d220840059a76b2712cda91f0, entries=150, sequenceid=262, filesize=30.5 K 2024-12-17T12:40:33,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/7c268eb4a2e44ef2bb307cbc42e01b90 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/7c268eb4a2e44ef2bb307cbc42e01b90 2024-12-17T12:40:33,619 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/7c268eb4a2e44ef2bb307cbc42e01b90, entries=150, sequenceid=262, filesize=12.0 K 2024-12-17T12:40:33,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/7b14fc95c3e0481ea2020d6a9e1ea7e2 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/7b14fc95c3e0481ea2020d6a9e1ea7e2 2024-12-17T12:40:33,623 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/7b14fc95c3e0481ea2020d6a9e1ea7e2, entries=150, sequenceid=262, filesize=12.0 K 2024-12-17T12:40:33,623 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=161.02 KB/164880 for 05f19a912451e6e90726d599fdf98d6d in 463ms, sequenceid=262, compaction requested=false 2024-12-17T12:40:33,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2538): Flush status journal for 05f19a912451e6e90726d599fdf98d6d: 2024-12-17T12:40:33,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:33,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=156 2024-12-17T12:40:33,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4106): Remote procedure done, pid=156 2024-12-17T12:40:33,625 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=156, resume processing ppid=155 2024-12-17T12:40:33,625 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, ppid=155, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 616 msec 2024-12-17T12:40:33,626 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees in 618 msec 2024-12-17T12:40:33,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:33,676 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 05f19a912451e6e90726d599fdf98d6d 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-12-17T12:40:33,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05f19a912451e6e90726d599fdf98d6d, store=A 2024-12-17T12:40:33,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:33,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05f19a912451e6e90726d599fdf98d6d, store=B 2024-12-17T12:40:33,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:33,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05f19a912451e6e90726d599fdf98d6d, store=C 2024-12-17T12:40:33,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:33,682 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:33,682 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:33,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43926 deadline: 1734439293680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:33,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43936 deadline: 1734439293680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:33,682 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:33,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439293680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:33,693 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412177f9921ca096f4cefb6a6b738138b4f26_05f19a912451e6e90726d599fdf98d6d is 50, key is test_row_0/A:col10/1734439233570/Put/seqid=0 2024-12-17T12:40:33,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742380_1556 (size=12454) 2024-12-17T12:40:33,697 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:33,699 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412177f9921ca096f4cefb6a6b738138b4f26_05f19a912451e6e90726d599fdf98d6d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412177f9921ca096f4cefb6a6b738138b4f26_05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:33,699 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/a0b212dd60b54b0687201f768ef9f3f8, store: [table=TestAcidGuarantees family=A region=05f19a912451e6e90726d599fdf98d6d] 2024-12-17T12:40:33,700 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/a0b212dd60b54b0687201f768ef9f3f8 is 175, key is test_row_0/A:col10/1734439233570/Put/seqid=0 2024-12-17T12:40:33,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742381_1557 (size=31255) 2024-12-17T12:40:33,702 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=294, memsize=60.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/a0b212dd60b54b0687201f768ef9f3f8 2024-12-17T12:40:33,707 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/19ad7db4319849abb37f411d3c8b1dbd is 50, key is test_row_0/B:col10/1734439233570/Put/seqid=0 2024-12-17T12:40:33,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742382_1558 (size=12301) 2024-12-17T12:40:33,783 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:33,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43926 deadline: 1734439293783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:33,784 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:33,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43936 deadline: 1734439293783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:33,784 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:33,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439293783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:33,916 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/29724745054743edb1d67fe2e5fcc288 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/29724745054743edb1d67fe2e5fcc288 2024-12-17T12:40:33,923 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 05f19a912451e6e90726d599fdf98d6d/C of 05f19a912451e6e90726d599fdf98d6d into 29724745054743edb1d67fe2e5fcc288(size=12.4 K), total size for store is 24.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:40:33,923 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05f19a912451e6e90726d599fdf98d6d: 2024-12-17T12:40:33,923 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d., storeName=05f19a912451e6e90726d599fdf98d6d/C, priority=13, startTime=1734439233069; duration=0sec 2024-12-17T12:40:33,923 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:40:33,923 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05f19a912451e6e90726d599fdf98d6d:C 2024-12-17T12:40:33,985 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:33,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43936 deadline: 1734439293984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:33,985 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:33,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43926 deadline: 1734439293984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:33,985 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:33,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439293985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:34,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-17T12:40:34,110 INFO [Thread-2164 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 155 completed 2024-12-17T12:40:34,110 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/19ad7db4319849abb37f411d3c8b1dbd 2024-12-17T12:40:34,111 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-17T12:40:34,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=157, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees 2024-12-17T12:40:34,112 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=157, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-17T12:40:34,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-17T12:40:34,112 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=157, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-17T12:40:34,112 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=158, ppid=157, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-17T12:40:34,116 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/302884fc3fe54460ad3d456cbc226ae1 is 50, key is test_row_0/C:col10/1734439233570/Put/seqid=0 2024-12-17T12:40:34,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742383_1559 (size=12301) 2024-12-17T12:40:34,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-17T12:40:34,266 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:34,266 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-17T12:40:34,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:34,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. as already flushing 2024-12-17T12:40:34,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:34,266 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:34,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:34,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:34,288 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:34,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43936 deadline: 1734439294287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:34,288 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:34,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439294287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:34,289 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:34,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43926 deadline: 1734439294288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:34,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-17T12:40:34,417 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:34,417 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-17T12:40:34,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:34,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. as already flushing 2024-12-17T12:40:34,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:34,417 ERROR [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:34,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:34,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-17T12:40:34,519 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/302884fc3fe54460ad3d456cbc226ae1 2024-12-17T12:40:34,522 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/a0b212dd60b54b0687201f768ef9f3f8 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/a0b212dd60b54b0687201f768ef9f3f8 2024-12-17T12:40:34,524 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/a0b212dd60b54b0687201f768ef9f3f8, entries=150, sequenceid=294, filesize=30.5 K 2024-12-17T12:40:34,525 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/19ad7db4319849abb37f411d3c8b1dbd as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/19ad7db4319849abb37f411d3c8b1dbd 2024-12-17T12:40:34,527 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/19ad7db4319849abb37f411d3c8b1dbd, entries=150, sequenceid=294, filesize=12.0 K 2024-12-17T12:40:34,527 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/302884fc3fe54460ad3d456cbc226ae1 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/302884fc3fe54460ad3d456cbc226ae1 2024-12-17T12:40:34,532 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/302884fc3fe54460ad3d456cbc226ae1, entries=150, sequenceid=294, filesize=12.0 K 2024-12-17T12:40:34,532 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~181.14 KB/185490, heapSize ~475.31 KB/486720, currentSize=20.13 KB/20610 for 05f19a912451e6e90726d599fdf98d6d in 857ms, sequenceid=294, compaction requested=true 2024-12-17T12:40:34,532 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 05f19a912451e6e90726d599fdf98d6d: 2024-12-17T12:40:34,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05f19a912451e6e90726d599fdf98d6d:A, priority=-2147483648, current under compaction store size is 1 2024-12-17T12:40:34,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:40:34,532 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:40:34,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05f19a912451e6e90726d599fdf98d6d:B, priority=-2147483648, current under compaction store size is 2 2024-12-17T12:40:34,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:40:34,533 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:40:34,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05f19a912451e6e90726d599fdf98d6d:C, priority=-2147483648, current under compaction store size is 3 2024-12-17T12:40:34,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:40:34,533 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94195 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:40:34,533 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:40:34,533 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): 05f19a912451e6e90726d599fdf98d6d/A is initiating minor compaction (all files) 2024-12-17T12:40:34,533 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 05f19a912451e6e90726d599fdf98d6d/B is initiating minor compaction (all files) 2024-12-17T12:40:34,533 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05f19a912451e6e90726d599fdf98d6d/A in TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:34,533 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05f19a912451e6e90726d599fdf98d6d/B in TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:34,533 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/05e92b2eda254d29b1b42bd12e48f9dc, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/7c268eb4a2e44ef2bb307cbc42e01b90, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/19ad7db4319849abb37f411d3c8b1dbd] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp, totalSize=36.5 K 2024-12-17T12:40:34,533 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/e316fd97d279433c828037a6fcea1377, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/70f4b71d220840059a76b2712cda91f0, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/a0b212dd60b54b0687201f768ef9f3f8] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp, totalSize=92.0 K 2024-12-17T12:40:34,534 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:34,534 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. files: [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/e316fd97d279433c828037a6fcea1377, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/70f4b71d220840059a76b2712cda91f0, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/a0b212dd60b54b0687201f768ef9f3f8] 2024-12-17T12:40:34,534 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 05e92b2eda254d29b1b42bd12e48f9dc, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1734439230812 2024-12-17T12:40:34,534 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting e316fd97d279433c828037a6fcea1377, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1734439230812 2024-12-17T12:40:34,534 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 7c268eb4a2e44ef2bb307cbc42e01b90, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1734439231425 2024-12-17T12:40:34,534 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 70f4b71d220840059a76b2712cda91f0, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1734439231425 2024-12-17T12:40:34,534 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 19ad7db4319849abb37f411d3c8b1dbd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1734439233570 2024-12-17T12:40:34,534 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting a0b212dd60b54b0687201f768ef9f3f8, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1734439233570 2024-12-17T12:40:34,538 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=05f19a912451e6e90726d599fdf98d6d] 2024-12-17T12:40:34,540 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05f19a912451e6e90726d599fdf98d6d#B#compaction#464 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:40:34,540 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412172d1141f15be44a2688406abd0c7c5014_05f19a912451e6e90726d599fdf98d6d store=[table=TestAcidGuarantees family=A region=05f19a912451e6e90726d599fdf98d6d] 2024-12-17T12:40:34,540 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/e1ab546a645d4c43be66476045b17d70 is 50, key is test_row_0/B:col10/1734439233570/Put/seqid=0 2024-12-17T12:40:34,542 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412172d1141f15be44a2688406abd0c7c5014_05f19a912451e6e90726d599fdf98d6d, store=[table=TestAcidGuarantees family=A region=05f19a912451e6e90726d599fdf98d6d] 2024-12-17T12:40:34,542 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412172d1141f15be44a2688406abd0c7c5014_05f19a912451e6e90726d599fdf98d6d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=05f19a912451e6e90726d599fdf98d6d] 2024-12-17T12:40:34,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742384_1560 (size=12983) 2024-12-17T12:40:34,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742385_1561 (size=4469) 2024-12-17T12:40:34,569 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:34,569 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36491 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-17T12:40:34,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:34,569 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2837): Flushing 05f19a912451e6e90726d599fdf98d6d 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-12-17T12:40:34,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05f19a912451e6e90726d599fdf98d6d, store=A 2024-12-17T12:40:34,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:34,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05f19a912451e6e90726d599fdf98d6d, store=B 2024-12-17T12:40:34,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:34,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05f19a912451e6e90726d599fdf98d6d, store=C 2024-12-17T12:40:34,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:34,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217c67e451afdb1410eb4cbee3501ae1197_05f19a912451e6e90726d599fdf98d6d is 50, key is test_row_0/A:col10/1734439233678/Put/seqid=0 2024-12-17T12:40:34,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742386_1562 (size=12454) 2024-12-17T12:40:34,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-17T12:40:34,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:34,794 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. as already flushing 2024-12-17T12:40:34,818 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:34,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439294815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:34,818 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:34,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43926 deadline: 1734439294815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:34,819 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:34,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43936 deadline: 1734439294817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:34,920 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:34,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439294919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:34,920 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:34,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43926 deadline: 1734439294919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:34,922 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:34,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43936 deadline: 1734439294920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:34,946 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05f19a912451e6e90726d599fdf98d6d#A#compaction#465 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:40:34,947 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/16304fedc25c4f67b8196f16b9e92da9 is 175, key is test_row_0/A:col10/1734439233570/Put/seqid=0 2024-12-17T12:40:34,949 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/e1ab546a645d4c43be66476045b17d70 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/e1ab546a645d4c43be66476045b17d70 2024-12-17T12:40:34,952 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 05f19a912451e6e90726d599fdf98d6d/B of 05f19a912451e6e90726d599fdf98d6d into e1ab546a645d4c43be66476045b17d70(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:40:34,952 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05f19a912451e6e90726d599fdf98d6d: 2024-12-17T12:40:34,952 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d., storeName=05f19a912451e6e90726d599fdf98d6d/B, priority=13, startTime=1734439234532; duration=0sec 2024-12-17T12:40:34,952 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:40:34,952 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05f19a912451e6e90726d599fdf98d6d:B 2024-12-17T12:40:34,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742387_1563 (size=31937) 2024-12-17T12:40:34,953 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:40:34,954 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:40:34,954 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 05f19a912451e6e90726d599fdf98d6d/C is initiating minor compaction (all files) 2024-12-17T12:40:34,954 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05f19a912451e6e90726d599fdf98d6d/C in TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:34,954 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/29724745054743edb1d67fe2e5fcc288, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/7b14fc95c3e0481ea2020d6a9e1ea7e2, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/302884fc3fe54460ad3d456cbc226ae1] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp, totalSize=36.5 K 2024-12-17T12:40:34,954 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 29724745054743edb1d67fe2e5fcc288, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1734439230812 2024-12-17T12:40:34,954 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 7b14fc95c3e0481ea2020d6a9e1ea7e2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1734439231425 2024-12-17T12:40:34,955 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 302884fc3fe54460ad3d456cbc226ae1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1734439233570 2024-12-17T12:40:34,959 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05f19a912451e6e90726d599fdf98d6d#C#compaction#467 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:40:34,959 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/f32ce6d9f5fa480fbc1e9f13a2de514f is 50, key is test_row_0/C:col10/1734439233570/Put/seqid=0 2024-12-17T12:40:34,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742388_1564 (size=12983) 2024-12-17T12:40:34,966 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/f32ce6d9f5fa480fbc1e9f13a2de514f as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/f32ce6d9f5fa480fbc1e9f13a2de514f 2024-12-17T12:40:34,969 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 05f19a912451e6e90726d599fdf98d6d/C of 05f19a912451e6e90726d599fdf98d6d into f32ce6d9f5fa480fbc1e9f13a2de514f(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:40:34,969 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05f19a912451e6e90726d599fdf98d6d: 2024-12-17T12:40:34,969 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d., storeName=05f19a912451e6e90726d599fdf98d6d/C, priority=13, startTime=1734439234533; duration=0sec 2024-12-17T12:40:34,969 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:40:34,969 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05f19a912451e6e90726d599fdf98d6d:C 2024-12-17T12:40:34,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:34,980 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217c67e451afdb1410eb4cbee3501ae1197_05f19a912451e6e90726d599fdf98d6d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217c67e451afdb1410eb4cbee3501ae1197_05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:34,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/a2754d22c95e4f93b1b1f83bc8763103, store: [table=TestAcidGuarantees family=A region=05f19a912451e6e90726d599fdf98d6d] 2024-12-17T12:40:34,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/a2754d22c95e4f93b1b1f83bc8763103 is 175, key is test_row_0/A:col10/1734439233678/Put/seqid=0 2024-12-17T12:40:34,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742389_1565 (size=31255) 2024-12-17T12:40:35,122 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:35,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439295121, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:35,124 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:35,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43926 deadline: 1734439295122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:35,124 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:35,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43936 deadline: 1734439295122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:35,168 DEBUG [Thread-2173 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1df61dc9 to 127.0.0.1:59557 2024-12-17T12:40:35,168 DEBUG [Thread-2173 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:40:35,169 DEBUG [Thread-2169 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x521aad6f to 127.0.0.1:59557 2024-12-17T12:40:35,169 DEBUG [Thread-2169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:40:35,170 DEBUG [Thread-2171 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6f5b2180 to 127.0.0.1:59557 2024-12-17T12:40:35,170 DEBUG [Thread-2171 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:40:35,170 DEBUG [Thread-2165 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x605827c9 to 127.0.0.1:59557 2024-12-17T12:40:35,170 DEBUG [Thread-2165 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:40:35,171 DEBUG [Thread-2167 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3677bd4f to 127.0.0.1:59557 2024-12-17T12:40:35,171 DEBUG [Thread-2167 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:40:35,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-17T12:40:35,362 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/16304fedc25c4f67b8196f16b9e92da9 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/16304fedc25c4f67b8196f16b9e92da9 2024-12-17T12:40:35,368 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 05f19a912451e6e90726d599fdf98d6d/A of 05f19a912451e6e90726d599fdf98d6d into 16304fedc25c4f67b8196f16b9e92da9(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:40:35,368 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05f19a912451e6e90726d599fdf98d6d: 2024-12-17T12:40:35,368 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d., storeName=05f19a912451e6e90726d599fdf98d6d/A, priority=13, startTime=1734439234532; duration=0sec 2024-12-17T12:40:35,368 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:40:35,368 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05f19a912451e6e90726d599fdf98d6d:A 2024-12-17T12:40:35,385 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=301, memsize=6.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/a2754d22c95e4f93b1b1f83bc8763103 2024-12-17T12:40:35,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/d7a5237bcf4a482da23488e1ddb77f4c is 50, key is test_row_0/B:col10/1734439233678/Put/seqid=0 2024-12-17T12:40:35,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742390_1566 (size=12301) 2024-12-17T12:40:35,425 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:35,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439295425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:35,426 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:35,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43926 deadline: 1734439295426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:35,427 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:35,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43936 deadline: 1734439295427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:35,794 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=301 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/d7a5237bcf4a482da23488e1ddb77f4c 2024-12-17T12:40:35,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/3bc9d3d558384e12a1dbfc2c5e62b91e is 50, key is test_row_0/C:col10/1734439233678/Put/seqid=0 2024-12-17T12:40:35,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742391_1567 (size=12301) 2024-12-17T12:40:35,930 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:35,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43926 deadline: 1734439295929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:35,930 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:35,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43936 deadline: 1734439295930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:35,931 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:35,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439295931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:36,213 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=301 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/3bc9d3d558384e12a1dbfc2c5e62b91e 2024-12-17T12:40:36,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-17T12:40:36,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/a2754d22c95e4f93b1b1f83bc8763103 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/a2754d22c95e4f93b1b1f83bc8763103 2024-12-17T12:40:36,225 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/a2754d22c95e4f93b1b1f83bc8763103, entries=150, sequenceid=301, filesize=30.5 K 2024-12-17T12:40:36,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/d7a5237bcf4a482da23488e1ddb77f4c as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/d7a5237bcf4a482da23488e1ddb77f4c 2024-12-17T12:40:36,227 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/d7a5237bcf4a482da23488e1ddb77f4c, entries=150, sequenceid=301, filesize=12.0 K 2024-12-17T12:40:36,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/3bc9d3d558384e12a1dbfc2c5e62b91e as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/3bc9d3d558384e12a1dbfc2c5e62b91e 2024-12-17T12:40:36,230 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/3bc9d3d558384e12a1dbfc2c5e62b91e, entries=150, sequenceid=301, filesize=12.0 K 2024-12-17T12:40:36,230 INFO [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=181.14 KB/185490 for 05f19a912451e6e90726d599fdf98d6d in 1661ms, sequenceid=301, compaction requested=false 2024-12-17T12:40:36,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2538): Flush status journal for 05f19a912451e6e90726d599fdf98d6d: 2024-12-17T12:40:36,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:36,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/681c08bfdbdf:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=158 2024-12-17T12:40:36,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster(4106): Remote procedure done, pid=158 2024-12-17T12:40:36,232 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=158, resume processing ppid=157 2024-12-17T12:40:36,232 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, ppid=157, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1190 sec 2024-12-17T12:40:36,233 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees in 2.1210 sec 2024-12-17T12:40:36,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36491 {}] regionserver.HRegion(8581): Flush requested on 05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:36,724 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 05f19a912451e6e90726d599fdf98d6d 3/3 column families, dataSize=187.85 KB heapSize=492.94 KB 2024-12-17T12:40:36,724 DEBUG [Thread-2160 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6a0e9c8f to 127.0.0.1:59557 2024-12-17T12:40:36,724 DEBUG [Thread-2160 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:40:36,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05f19a912451e6e90726d599fdf98d6d, store=A 2024-12-17T12:40:36,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:36,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05f19a912451e6e90726d599fdf98d6d, store=B 2024-12-17T12:40:36,726 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:36,726 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05f19a912451e6e90726d599fdf98d6d, store=C 2024-12-17T12:40:36,726 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:36,734 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121707852c86756c49f7a8eefa3b59d968f7_05f19a912451e6e90726d599fdf98d6d is 50, key is test_row_0/A:col10/1734439234816/Put/seqid=0 2024-12-17T12:40:36,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742392_1568 (size=12454) 2024-12-17T12:40:36,886 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-17T12:40:36,938 DEBUG [Thread-2162 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0d68f787 to 127.0.0.1:59557 2024-12-17T12:40:36,938 DEBUG [Thread-2154 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2e9ae050 to 127.0.0.1:59557 2024-12-17T12:40:36,938 DEBUG [Thread-2154 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:40:36,938 DEBUG [Thread-2162 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:40:36,939 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-17T12:40:36,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36491 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1734439296939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:37,138 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:37,146 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121707852c86756c49f7a8eefa3b59d968f7_05f19a912451e6e90726d599fdf98d6d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121707852c86756c49f7a8eefa3b59d968f7_05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:37,147 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/e42eaa73bcca4444b3c3c1632582b800, store: [table=TestAcidGuarantees family=A region=05f19a912451e6e90726d599fdf98d6d] 2024-12-17T12:40:37,148 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/e42eaa73bcca4444b3c3c1632582b800 is 175, key is test_row_0/A:col10/1734439234816/Put/seqid=0 2024-12-17T12:40:37,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742393_1569 (size=31255) 2024-12-17T12:40:37,554 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=335, memsize=62.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/e42eaa73bcca4444b3c3c1632582b800 2024-12-17T12:40:37,563 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/0643d9d3496a45f1b38022c64de73677 is 50, key is test_row_0/B:col10/1734439234816/Put/seqid=0 2024-12-17T12:40:37,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742394_1570 (size=12301) 2024-12-17T12:40:37,967 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=335 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/0643d9d3496a45f1b38022c64de73677 2024-12-17T12:40:37,972 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/7e377d256a39493292a83a9e74f0dfb3 is 50, key is test_row_0/C:col10/1734439234816/Put/seqid=0 2024-12-17T12:40:37,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742395_1571 (size=12301) 2024-12-17T12:40:38,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-17T12:40:38,219 INFO [Thread-2164 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 157 completed 2024-12-17T12:40:38,377 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=335 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/7e377d256a39493292a83a9e74f0dfb3 2024-12-17T12:40:38,386 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/e42eaa73bcca4444b3c3c1632582b800 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/e42eaa73bcca4444b3c3c1632582b800 2024-12-17T12:40:38,391 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/e42eaa73bcca4444b3c3c1632582b800, entries=150, sequenceid=335, filesize=30.5 K 2024-12-17T12:40:38,392 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/0643d9d3496a45f1b38022c64de73677 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/0643d9d3496a45f1b38022c64de73677 2024-12-17T12:40:38,394 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/0643d9d3496a45f1b38022c64de73677, entries=150, sequenceid=335, filesize=12.0 K 2024-12-17T12:40:38,395 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/7e377d256a39493292a83a9e74f0dfb3 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/7e377d256a39493292a83a9e74f0dfb3 2024-12-17T12:40:38,397 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/7e377d256a39493292a83a9e74f0dfb3, entries=150, sequenceid=335, filesize=12.0 K 2024-12-17T12:40:38,397 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~187.85 KB/192360, heapSize ~492.89 KB/504720, currentSize=13.42 KB/13740 for 05f19a912451e6e90726d599fdf98d6d in 1673ms, sequenceid=335, compaction requested=true 2024-12-17T12:40:38,397 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 05f19a912451e6e90726d599fdf98d6d: 2024-12-17T12:40:38,397 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05f19a912451e6e90726d599fdf98d6d:A, priority=-2147483648, current under compaction store size is 1 2024-12-17T12:40:38,397 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:40:38,397 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:40:38,397 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05f19a912451e6e90726d599fdf98d6d:B, priority=-2147483648, current under compaction store size is 2 2024-12-17T12:40:38,398 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:40:38,398 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05f19a912451e6e90726d599fdf98d6d:C, priority=-2147483648, current under compaction store size is 3 2024-12-17T12:40:38,398 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:40:38,398 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:40:38,398 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94447 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:40:38,398 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:40:38,398 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1540): 05f19a912451e6e90726d599fdf98d6d/A is initiating minor compaction (all files) 2024-12-17T12:40:38,398 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 05f19a912451e6e90726d599fdf98d6d/B is initiating minor compaction (all files) 2024-12-17T12:40:38,398 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05f19a912451e6e90726d599fdf98d6d/A in TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:38,398 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05f19a912451e6e90726d599fdf98d6d/B in TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:38,398 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/16304fedc25c4f67b8196f16b9e92da9, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/a2754d22c95e4f93b1b1f83bc8763103, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/e42eaa73bcca4444b3c3c1632582b800] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp, totalSize=92.2 K 2024-12-17T12:40:38,398 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:38,398 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. files: [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/16304fedc25c4f67b8196f16b9e92da9, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/a2754d22c95e4f93b1b1f83bc8763103, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/e42eaa73bcca4444b3c3c1632582b800] 2024-12-17T12:40:38,398 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/e1ab546a645d4c43be66476045b17d70, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/d7a5237bcf4a482da23488e1ddb77f4c, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/0643d9d3496a45f1b38022c64de73677] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp, totalSize=36.7 K 2024-12-17T12:40:38,398 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting 16304fedc25c4f67b8196f16b9e92da9, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1734439233570 2024-12-17T12:40:38,399 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting e1ab546a645d4c43be66476045b17d70, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1734439233570 2024-12-17T12:40:38,399 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting a2754d22c95e4f93b1b1f83bc8763103, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1734439233678 2024-12-17T12:40:38,399 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting d7a5237bcf4a482da23488e1ddb77f4c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1734439233678 2024-12-17T12:40:38,399 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] compactions.Compactor(224): Compacting e42eaa73bcca4444b3c3c1632582b800, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1734439234805 2024-12-17T12:40:38,399 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 0643d9d3496a45f1b38022c64de73677, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1734439234805 2024-12-17T12:40:38,404 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=05f19a912451e6e90726d599fdf98d6d] 2024-12-17T12:40:38,405 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241217a1e741d0af454f3fa361b630ad7f1b41_05f19a912451e6e90726d599fdf98d6d store=[table=TestAcidGuarantees family=A region=05f19a912451e6e90726d599fdf98d6d] 2024-12-17T12:40:38,405 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05f19a912451e6e90726d599fdf98d6d#B#compaction#473 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:40:38,405 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/1e1f776ea960429fbe9c4b79e60921cf is 50, key is test_row_0/B:col10/1734439234816/Put/seqid=0 2024-12-17T12:40:38,406 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241217a1e741d0af454f3fa361b630ad7f1b41_05f19a912451e6e90726d599fdf98d6d, store=[table=TestAcidGuarantees family=A region=05f19a912451e6e90726d599fdf98d6d] 2024-12-17T12:40:38,406 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241217a1e741d0af454f3fa361b630ad7f1b41_05f19a912451e6e90726d599fdf98d6d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=05f19a912451e6e90726d599fdf98d6d] 2024-12-17T12:40:38,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742396_1572 (size=13085) 2024-12-17T12:40:38,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742397_1573 (size=4469) 2024-12-17T12:40:38,816 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05f19a912451e6e90726d599fdf98d6d#A#compaction#474 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:40:38,817 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/8767e8af636b4055922f63d7b19f0f0e is 175, key is test_row_0/A:col10/1734439234816/Put/seqid=0 2024-12-17T12:40:38,817 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/1e1f776ea960429fbe9c4b79e60921cf as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/1e1f776ea960429fbe9c4b79e60921cf 2024-12-17T12:40:38,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742398_1574 (size=32039) 2024-12-17T12:40:38,824 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 05f19a912451e6e90726d599fdf98d6d/B of 05f19a912451e6e90726d599fdf98d6d into 1e1f776ea960429fbe9c4b79e60921cf(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:40:38,824 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05f19a912451e6e90726d599fdf98d6d: 2024-12-17T12:40:38,824 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d., storeName=05f19a912451e6e90726d599fdf98d6d/B, priority=13, startTime=1734439238397; duration=0sec 2024-12-17T12:40:38,824 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-17T12:40:38,824 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05f19a912451e6e90726d599fdf98d6d:B 2024-12-17T12:40:38,824 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-17T12:40:38,826 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-17T12:40:38,826 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1540): 05f19a912451e6e90726d599fdf98d6d/C is initiating minor compaction (all files) 2024-12-17T12:40:38,826 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05f19a912451e6e90726d599fdf98d6d/C in TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:38,826 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/f32ce6d9f5fa480fbc1e9f13a2de514f, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/3bc9d3d558384e12a1dbfc2c5e62b91e, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/7e377d256a39493292a83a9e74f0dfb3] into tmpdir=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp, totalSize=36.7 K 2024-12-17T12:40:38,826 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting f32ce6d9f5fa480fbc1e9f13a2de514f, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1734439233570 2024-12-17T12:40:38,827 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 3bc9d3d558384e12a1dbfc2c5e62b91e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1734439233678 2024-12-17T12:40:38,827 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] compactions.Compactor(224): Compacting 7e377d256a39493292a83a9e74f0dfb3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1734439234805 2024-12-17T12:40:38,835 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05f19a912451e6e90726d599fdf98d6d#C#compaction#475 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-17T12:40:38,836 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/50bee73dab3c4d0aa3c276a187c75040 is 50, key is test_row_0/C:col10/1734439234816/Put/seqid=0 2024-12-17T12:40:38,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742399_1575 (size=13085) 2024-12-17T12:40:38,961 DEBUG [Thread-2156 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2fef31f8 to 127.0.0.1:59557 2024-12-17T12:40:38,961 DEBUG [Thread-2156 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:40:39,232 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/8767e8af636b4055922f63d7b19f0f0e as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/8767e8af636b4055922f63d7b19f0f0e 2024-12-17T12:40:39,238 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 05f19a912451e6e90726d599fdf98d6d/A of 05f19a912451e6e90726d599fdf98d6d into 8767e8af636b4055922f63d7b19f0f0e(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:40:39,238 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05f19a912451e6e90726d599fdf98d6d: 2024-12-17T12:40:39,238 INFO [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d., storeName=05f19a912451e6e90726d599fdf98d6d/A, priority=13, startTime=1734439238397; duration=0sec 2024-12-17T12:40:39,238 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:40:39,238 DEBUG [RS:0;681c08bfdbdf:36491-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05f19a912451e6e90726d599fdf98d6d:A 2024-12-17T12:40:39,245 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/50bee73dab3c4d0aa3c276a187c75040 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/50bee73dab3c4d0aa3c276a187c75040 2024-12-17T12:40:39,251 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 05f19a912451e6e90726d599fdf98d6d/C of 05f19a912451e6e90726d599fdf98d6d into 50bee73dab3c4d0aa3c276a187c75040(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-17T12:40:39,251 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05f19a912451e6e90726d599fdf98d6d: 2024-12-17T12:40:39,251 INFO [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d., storeName=05f19a912451e6e90726d599fdf98d6d/C, priority=13, startTime=1734439238398; duration=0sec 2024-12-17T12:40:39,251 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-17T12:40:39,251 DEBUG [RS:0;681c08bfdbdf:36491-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05f19a912451e6e90726d599fdf98d6d:C 2024-12-17T12:40:39,768 DEBUG [Thread-2158 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0eb04aeb to 127.0.0.1:59557 2024-12-17T12:40:39,768 DEBUG [Thread-2158 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:40:39,769 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-17T12:40:39,769 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 72 2024-12-17T12:40:39,769 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 70 2024-12-17T12:40:39,769 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 35 2024-12-17T12:40:39,769 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 15 2024-12-17T12:40:39,769 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 73 2024-12-17T12:40:39,769 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-17T12:40:39,769 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8854 2024-12-17T12:40:39,769 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8593 2024-12-17T12:40:39,770 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8053 2024-12-17T12:40:39,770 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8834 2024-12-17T12:40:39,770 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8568 2024-12-17T12:40:39,770 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-17T12:40:39,770 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-17T12:40:39,770 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x34cb3991 to 127.0.0.1:59557 2024-12-17T12:40:39,770 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:40:39,771 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-17T12:40:39,772 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-17T12:40:39,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=159, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-17T12:40:39,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-17T12:40:39,775 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734439239775"}]},"ts":"1734439239775"} 2024-12-17T12:40:39,776 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-17T12:40:39,795 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-17T12:40:39,796 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-17T12:40:39,798 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=161, ppid=160, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=05f19a912451e6e90726d599fdf98d6d, UNASSIGN}] 2024-12-17T12:40:39,799 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=161, ppid=160, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=05f19a912451e6e90726d599fdf98d6d, UNASSIGN 2024-12-17T12:40:39,800 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=161 updating hbase:meta row=05f19a912451e6e90726d599fdf98d6d, regionState=CLOSING, regionLocation=681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:39,802 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-17T12:40:39,802 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=162, ppid=161, state=RUNNABLE; CloseRegionProcedure 05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372}] 2024-12-17T12:40:39,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-17T12:40:39,954 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:39,955 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=162}] handler.UnassignRegionHandler(124): Close 05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:39,956 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=162}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-17T12:40:39,956 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=162}] regionserver.HRegion(1681): Closing 05f19a912451e6e90726d599fdf98d6d, disabling compactions & flushes 2024-12-17T12:40:39,956 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=162}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:39,956 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=162}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:39,956 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=162}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. after waiting 0 ms 2024-12-17T12:40:39,956 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=162}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:39,956 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=162}] regionserver.HRegion(2837): Flushing 05f19a912451e6e90726d599fdf98d6d 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-12-17T12:40:39,957 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05f19a912451e6e90726d599fdf98d6d, store=A 2024-12-17T12:40:39,957 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:39,957 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05f19a912451e6e90726d599fdf98d6d, store=B 2024-12-17T12:40:39,957 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:39,958 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05f19a912451e6e90726d599fdf98d6d, store=C 2024-12-17T12:40:39,958 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-17T12:40:39,969 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121742725b33117c4dcfaa364362eb302e54_05f19a912451e6e90726d599fdf98d6d is 50, key is test_row_0/A:col10/1734439238958/Put/seqid=0 2024-12-17T12:40:39,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742400_1576 (size=12454) 2024-12-17T12:40:40,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-17T12:40:40,374 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=162}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-17T12:40:40,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-17T12:40:40,380 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=162}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121742725b33117c4dcfaa364362eb302e54_05f19a912451e6e90726d599fdf98d6d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121742725b33117c4dcfaa364362eb302e54_05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:40,381 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=162}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/3befb08852334911addc41b4ee33c7fb, store: [table=TestAcidGuarantees family=A region=05f19a912451e6e90726d599fdf98d6d] 2024-12-17T12:40:40,382 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/3befb08852334911addc41b4ee33c7fb is 175, key is test_row_0/A:col10/1734439238958/Put/seqid=0 2024-12-17T12:40:40,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742401_1577 (size=31255) 2024-12-17T12:40:40,788 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=162}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=345, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/3befb08852334911addc41b4ee33c7fb 2024-12-17T12:40:40,799 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/3fcc125715764ac397bf0c5b68e63455 is 50, key is test_row_0/B:col10/1734439238958/Put/seqid=0 2024-12-17T12:40:40,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742402_1578 (size=12301) 2024-12-17T12:40:40,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-17T12:40:41,204 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=345 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/3fcc125715764ac397bf0c5b68e63455 2024-12-17T12:40:41,216 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/a161f194933a4f379e27491e553253e9 is 50, key is test_row_0/C:col10/1734439238958/Put/seqid=0 2024-12-17T12:40:41,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742403_1579 (size=12301) 2024-12-17T12:40:41,620 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=345 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/a161f194933a4f379e27491e553253e9 2024-12-17T12:40:41,632 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/A/3befb08852334911addc41b4ee33c7fb as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/3befb08852334911addc41b4ee33c7fb 2024-12-17T12:40:41,637 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/3befb08852334911addc41b4ee33c7fb, entries=150, sequenceid=345, filesize=30.5 K 2024-12-17T12:40:41,638 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/B/3fcc125715764ac397bf0c5b68e63455 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/3fcc125715764ac397bf0c5b68e63455 2024-12-17T12:40:41,641 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/3fcc125715764ac397bf0c5b68e63455, entries=150, sequenceid=345, filesize=12.0 K 2024-12-17T12:40:41,642 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/.tmp/C/a161f194933a4f379e27491e553253e9 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/a161f194933a4f379e27491e553253e9 2024-12-17T12:40:41,645 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/a161f194933a4f379e27491e553253e9, entries=150, sequenceid=345, filesize=12.0 K 2024-12-17T12:40:41,646 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=162}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 05f19a912451e6e90726d599fdf98d6d in 1690ms, sequenceid=345, compaction requested=false 2024-12-17T12:40:41,646 DEBUG [StoreCloser-TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/091ed521f5a2453598fc0e14379d2476, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/95a3dd22d6bc433584815a43877984f7, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/5a72482db11e4b4c9343e6524606bb3b, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/69b772d7434540d3b7f1259689e7afc8, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/57bceb25fc014b5988c82128f9ab5b5c, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/36be9b6774194945ab205d8051ddb78d, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/84999441224f44868d22efbd74e7b96e, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/1d7ac7c71d884b66a9f00846bf5c4841, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/3da0081aee2e4492beb3d1c2d162f762, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/715b2c17c24446eb9803820f8d79d242, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/9db7dbf316f34f49ad74f3bc9037b54f, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/8c40f2a813844d56ab6ae8c94c095114, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/cb1c4a415d37477db45bd01a25037628, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/c962bec30aab4ef3b3364c209b1df0a8, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/c61460c892c2461c81632c07c1bc3b8d, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/2fa06d247c624c4ea8086a9bbe00c61d, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/e316fd97d279433c828037a6fcea1377, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/947c77633dd8485e81c3f5213ccbf057, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/70f4b71d220840059a76b2712cda91f0, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/16304fedc25c4f67b8196f16b9e92da9, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/a0b212dd60b54b0687201f768ef9f3f8, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/a2754d22c95e4f93b1b1f83bc8763103, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/e42eaa73bcca4444b3c3c1632582b800] to archive 2024-12-17T12:40:41,647 DEBUG [StoreCloser-TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-17T12:40:41,650 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/57bceb25fc014b5988c82128f9ab5b5c to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/57bceb25fc014b5988c82128f9ab5b5c 2024-12-17T12:40:41,650 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/95a3dd22d6bc433584815a43877984f7 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/95a3dd22d6bc433584815a43877984f7 2024-12-17T12:40:41,650 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/36be9b6774194945ab205d8051ddb78d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/36be9b6774194945ab205d8051ddb78d 2024-12-17T12:40:41,650 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/69b772d7434540d3b7f1259689e7afc8 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/69b772d7434540d3b7f1259689e7afc8 2024-12-17T12:40:41,650 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/5a72482db11e4b4c9343e6524606bb3b to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/5a72482db11e4b4c9343e6524606bb3b 2024-12-17T12:40:41,651 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/1d7ac7c71d884b66a9f00846bf5c4841 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/1d7ac7c71d884b66a9f00846bf5c4841 2024-12-17T12:40:41,651 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/091ed521f5a2453598fc0e14379d2476 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/091ed521f5a2453598fc0e14379d2476 2024-12-17T12:40:41,651 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/84999441224f44868d22efbd74e7b96e to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/84999441224f44868d22efbd74e7b96e 2024-12-17T12:40:41,652 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/3da0081aee2e4492beb3d1c2d162f762 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/3da0081aee2e4492beb3d1c2d162f762 2024-12-17T12:40:41,653 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/715b2c17c24446eb9803820f8d79d242 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/715b2c17c24446eb9803820f8d79d242 2024-12-17T12:40:41,653 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/8c40f2a813844d56ab6ae8c94c095114 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/8c40f2a813844d56ab6ae8c94c095114 2024-12-17T12:40:41,653 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/9db7dbf316f34f49ad74f3bc9037b54f to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/9db7dbf316f34f49ad74f3bc9037b54f 2024-12-17T12:40:41,653 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/cb1c4a415d37477db45bd01a25037628 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/cb1c4a415d37477db45bd01a25037628 2024-12-17T12:40:41,653 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/c962bec30aab4ef3b3364c209b1df0a8 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/c962bec30aab4ef3b3364c209b1df0a8 2024-12-17T12:40:41,654 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/c61460c892c2461c81632c07c1bc3b8d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/c61460c892c2461c81632c07c1bc3b8d 2024-12-17T12:40:41,654 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/2fa06d247c624c4ea8086a9bbe00c61d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/2fa06d247c624c4ea8086a9bbe00c61d 2024-12-17T12:40:41,654 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/e316fd97d279433c828037a6fcea1377 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/e316fd97d279433c828037a6fcea1377 2024-12-17T12:40:41,655 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/70f4b71d220840059a76b2712cda91f0 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/70f4b71d220840059a76b2712cda91f0 2024-12-17T12:40:41,655 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/947c77633dd8485e81c3f5213ccbf057 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/947c77633dd8485e81c3f5213ccbf057 2024-12-17T12:40:41,655 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/a2754d22c95e4f93b1b1f83bc8763103 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/a2754d22c95e4f93b1b1f83bc8763103 2024-12-17T12:40:41,655 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/16304fedc25c4f67b8196f16b9e92da9 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/16304fedc25c4f67b8196f16b9e92da9 2024-12-17T12:40:41,655 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/e42eaa73bcca4444b3c3c1632582b800 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/e42eaa73bcca4444b3c3c1632582b800 2024-12-17T12:40:41,655 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/a0b212dd60b54b0687201f768ef9f3f8 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/a0b212dd60b54b0687201f768ef9f3f8 2024-12-17T12:40:41,656 DEBUG [StoreCloser-TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/58e6e265e811460092ea980dd9a9c144, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/07e5b339b75d433dab44b65a9d330610, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/b7557f6ca4b944549f15bd7ef8762156, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/700188936dc740df94eabc6191460f8d, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/b5a04b7195014db8a567f05981adec7f, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/c48e789bd0ef4016871553a02f2b49f9, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/a0ed9c1d11844c9288159a1884b054c4, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/aec274a4aabb4ebfa77d0d79783fed81, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/592ff35fc3fc4a2fbbff77e38ffb7732, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/3fe0129e16ae467094af857240e86336, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/31eb1e202ac34306bf2db4991d42b89e, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/2073930ddf7c4e4ba03ee402d081ad11, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/cf770b61021b41fa973b43b9928c1d3b, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/feba350231e747018ecaf68a046821b0, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/eecac2f5dee848ebbbf2ebe7808e6931, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/d72a424e8a89451cb942f7f54f0efe78, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/05e92b2eda254d29b1b42bd12e48f9dc, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/52c1cae2dad24879970ffcfd38a54f94, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/7c268eb4a2e44ef2bb307cbc42e01b90, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/e1ab546a645d4c43be66476045b17d70, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/19ad7db4319849abb37f411d3c8b1dbd, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/d7a5237bcf4a482da23488e1ddb77f4c, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/0643d9d3496a45f1b38022c64de73677] to archive 2024-12-17T12:40:41,657 DEBUG [StoreCloser-TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-17T12:40:41,658 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/58e6e265e811460092ea980dd9a9c144 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/58e6e265e811460092ea980dd9a9c144 2024-12-17T12:40:41,658 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/b5a04b7195014db8a567f05981adec7f to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/b5a04b7195014db8a567f05981adec7f 2024-12-17T12:40:41,658 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/700188936dc740df94eabc6191460f8d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/700188936dc740df94eabc6191460f8d 2024-12-17T12:40:41,658 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/07e5b339b75d433dab44b65a9d330610 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/07e5b339b75d433dab44b65a9d330610 2024-12-17T12:40:41,658 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/c48e789bd0ef4016871553a02f2b49f9 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/c48e789bd0ef4016871553a02f2b49f9 2024-12-17T12:40:41,658 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/b7557f6ca4b944549f15bd7ef8762156 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/b7557f6ca4b944549f15bd7ef8762156 2024-12-17T12:40:41,658 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/a0ed9c1d11844c9288159a1884b054c4 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/a0ed9c1d11844c9288159a1884b054c4 2024-12-17T12:40:41,658 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/aec274a4aabb4ebfa77d0d79783fed81 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/aec274a4aabb4ebfa77d0d79783fed81 2024-12-17T12:40:41,659 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/2073930ddf7c4e4ba03ee402d081ad11 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/2073930ddf7c4e4ba03ee402d081ad11 2024-12-17T12:40:41,659 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/3fe0129e16ae467094af857240e86336 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/3fe0129e16ae467094af857240e86336 2024-12-17T12:40:41,659 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/592ff35fc3fc4a2fbbff77e38ffb7732 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/592ff35fc3fc4a2fbbff77e38ffb7732 2024-12-17T12:40:41,659 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/31eb1e202ac34306bf2db4991d42b89e to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/31eb1e202ac34306bf2db4991d42b89e 2024-12-17T12:40:41,659 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/cf770b61021b41fa973b43b9928c1d3b to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/cf770b61021b41fa973b43b9928c1d3b 2024-12-17T12:40:41,660 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/d72a424e8a89451cb942f7f54f0efe78 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/d72a424e8a89451cb942f7f54f0efe78 2024-12-17T12:40:41,660 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/eecac2f5dee848ebbbf2ebe7808e6931 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/eecac2f5dee848ebbbf2ebe7808e6931 2024-12-17T12:40:41,660 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/feba350231e747018ecaf68a046821b0 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/feba350231e747018ecaf68a046821b0 2024-12-17T12:40:41,660 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/05e92b2eda254d29b1b42bd12e48f9dc to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/05e92b2eda254d29b1b42bd12e48f9dc 2024-12-17T12:40:41,661 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/7c268eb4a2e44ef2bb307cbc42e01b90 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/7c268eb4a2e44ef2bb307cbc42e01b90 2024-12-17T12:40:41,661 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/19ad7db4319849abb37f411d3c8b1dbd to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/19ad7db4319849abb37f411d3c8b1dbd 2024-12-17T12:40:41,661 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/52c1cae2dad24879970ffcfd38a54f94 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/52c1cae2dad24879970ffcfd38a54f94 2024-12-17T12:40:41,661 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/e1ab546a645d4c43be66476045b17d70 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/e1ab546a645d4c43be66476045b17d70 2024-12-17T12:40:41,661 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/d7a5237bcf4a482da23488e1ddb77f4c to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/d7a5237bcf4a482da23488e1ddb77f4c 2024-12-17T12:40:41,661 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/0643d9d3496a45f1b38022c64de73677 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/0643d9d3496a45f1b38022c64de73677 2024-12-17T12:40:41,662 DEBUG [StoreCloser-TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/22c00e7b461c4305b5992dd44790f63d, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/2ace280874e54a64adbbae312061d575, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/5678a241cc7f4041bf3a73deb84a63bf, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/f7928543c2de4a95a65c069251b1424d, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/ad8f9329ee66438581582907271001aa, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/c2cd6a2103e54ba49c4b8a2242ce4915, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/60ae712495274839a370ee9ce50415c1, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/c4b0281920fe4b4cba17c2f289a38c11, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/feff22f1ff6a4ba2b666b28f71546d73, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/61ffd7649f454989ab0df69fdabacf60, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/09a58652ceb3473b8c115bcd6a48dc16, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/7e38f0bc702041349d8053c53c3002be, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/d217ebc629ce4e8db8c37fa641829ee8, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/cc71621da8dd430895e7f72e8b06206d, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/e2acedf7a49b44319a5b5005b1ed8101, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/55ecf9796c274f95a53c3498e9f95e4d, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/29724745054743edb1d67fe2e5fcc288, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/744cc97aee7845bc813ec59bad9c7f04, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/7b14fc95c3e0481ea2020d6a9e1ea7e2, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/f32ce6d9f5fa480fbc1e9f13a2de514f, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/302884fc3fe54460ad3d456cbc226ae1, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/3bc9d3d558384e12a1dbfc2c5e62b91e, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/7e377d256a39493292a83a9e74f0dfb3] to archive 2024-12-17T12:40:41,662 DEBUG [StoreCloser-TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-17T12:40:41,663 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/2ace280874e54a64adbbae312061d575 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/2ace280874e54a64adbbae312061d575 2024-12-17T12:40:41,664 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/22c00e7b461c4305b5992dd44790f63d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/22c00e7b461c4305b5992dd44790f63d 2024-12-17T12:40:41,664 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/f7928543c2de4a95a65c069251b1424d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/f7928543c2de4a95a65c069251b1424d 2024-12-17T12:40:41,664 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/c2cd6a2103e54ba49c4b8a2242ce4915 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/c2cd6a2103e54ba49c4b8a2242ce4915 2024-12-17T12:40:41,664 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/5678a241cc7f4041bf3a73deb84a63bf to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/5678a241cc7f4041bf3a73deb84a63bf 2024-12-17T12:40:41,664 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/ad8f9329ee66438581582907271001aa to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/ad8f9329ee66438581582907271001aa 2024-12-17T12:40:41,664 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/c4b0281920fe4b4cba17c2f289a38c11 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/c4b0281920fe4b4cba17c2f289a38c11 2024-12-17T12:40:41,664 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/60ae712495274839a370ee9ce50415c1 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/60ae712495274839a370ee9ce50415c1 2024-12-17T12:40:41,665 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/feff22f1ff6a4ba2b666b28f71546d73 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/feff22f1ff6a4ba2b666b28f71546d73 2024-12-17T12:40:41,665 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/61ffd7649f454989ab0df69fdabacf60 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/61ffd7649f454989ab0df69fdabacf60 2024-12-17T12:40:41,665 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/09a58652ceb3473b8c115bcd6a48dc16 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/09a58652ceb3473b8c115bcd6a48dc16 2024-12-17T12:40:41,665 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/cc71621da8dd430895e7f72e8b06206d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/cc71621da8dd430895e7f72e8b06206d 2024-12-17T12:40:41,665 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/7e38f0bc702041349d8053c53c3002be to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/7e38f0bc702041349d8053c53c3002be 2024-12-17T12:40:41,665 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/d217ebc629ce4e8db8c37fa641829ee8 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/d217ebc629ce4e8db8c37fa641829ee8 2024-12-17T12:40:41,665 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/e2acedf7a49b44319a5b5005b1ed8101 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/e2acedf7a49b44319a5b5005b1ed8101 2024-12-17T12:40:41,666 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/55ecf9796c274f95a53c3498e9f95e4d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/55ecf9796c274f95a53c3498e9f95e4d 2024-12-17T12:40:41,669 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/29724745054743edb1d67fe2e5fcc288 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/29724745054743edb1d67fe2e5fcc288 2024-12-17T12:40:41,669 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/f32ce6d9f5fa480fbc1e9f13a2de514f to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/f32ce6d9f5fa480fbc1e9f13a2de514f 2024-12-17T12:40:41,669 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/744cc97aee7845bc813ec59bad9c7f04 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/744cc97aee7845bc813ec59bad9c7f04 2024-12-17T12:40:41,669 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/3bc9d3d558384e12a1dbfc2c5e62b91e to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/3bc9d3d558384e12a1dbfc2c5e62b91e 2024-12-17T12:40:41,671 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/7b14fc95c3e0481ea2020d6a9e1ea7e2 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/7b14fc95c3e0481ea2020d6a9e1ea7e2 2024-12-17T12:40:41,671 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/302884fc3fe54460ad3d456cbc226ae1 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/302884fc3fe54460ad3d456cbc226ae1 2024-12-17T12:40:41,671 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/7e377d256a39493292a83a9e74f0dfb3 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/7e377d256a39493292a83a9e74f0dfb3 2024-12-17T12:40:41,674 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=162}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/recovered.edits/348.seqid, newMaxSeqId=348, maxSeqId=4 2024-12-17T12:40:41,674 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=162}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d. 2024-12-17T12:40:41,674 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=162}] regionserver.HRegion(1635): Region close journal for 05f19a912451e6e90726d599fdf98d6d: 2024-12-17T12:40:41,676 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION, pid=162}] handler.UnassignRegionHandler(170): Closed 05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:41,676 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=161 updating hbase:meta row=05f19a912451e6e90726d599fdf98d6d, regionState=CLOSED 2024-12-17T12:40:41,678 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=162, resume processing ppid=161 2024-12-17T12:40:41,678 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, ppid=161, state=SUCCESS; CloseRegionProcedure 05f19a912451e6e90726d599fdf98d6d, server=681c08bfdbdf,36491,1734439058372 in 1.8750 sec 2024-12-17T12:40:41,678 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=161, resume processing ppid=160 2024-12-17T12:40:41,678 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, ppid=160, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=05f19a912451e6e90726d599fdf98d6d, UNASSIGN in 1.8800 sec 2024-12-17T12:40:41,679 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=160, resume processing ppid=159 2024-12-17T12:40:41,680 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, ppid=159, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.8830 sec 2024-12-17T12:40:41,680 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734439241680"}]},"ts":"1734439241680"} 2024-12-17T12:40:41,681 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-17T12:40:41,717 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-17T12:40:41,719 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.9460 sec 2024-12-17T12:40:41,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-17T12:40:41,884 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 159 completed 2024-12-17T12:40:41,885 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-17T12:40:41,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] procedure2.ProcedureExecutor(1098): Stored pid=163, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-17T12:40:41,889 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=163, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-17T12:40:41,891 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=163, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-17T12:40:41,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-17T12:40:41,892 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:41,896 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A, FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B, FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C, FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/recovered.edits] 2024-12-17T12:40:41,901 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/8767e8af636b4055922f63d7b19f0f0e to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/8767e8af636b4055922f63d7b19f0f0e 2024-12-17T12:40:41,901 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/3befb08852334911addc41b4ee33c7fb to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/A/3befb08852334911addc41b4ee33c7fb 2024-12-17T12:40:41,905 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/1e1f776ea960429fbe9c4b79e60921cf to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/1e1f776ea960429fbe9c4b79e60921cf 2024-12-17T12:40:41,905 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/3fcc125715764ac397bf0c5b68e63455 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/B/3fcc125715764ac397bf0c5b68e63455 2024-12-17T12:40:41,909 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/a161f194933a4f379e27491e553253e9 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/a161f194933a4f379e27491e553253e9 2024-12-17T12:40:41,909 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/50bee73dab3c4d0aa3c276a187c75040 to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/C/50bee73dab3c4d0aa3c276a187c75040 2024-12-17T12:40:41,912 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/recovered.edits/348.seqid to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d/recovered.edits/348.seqid 2024-12-17T12:40:41,912 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/default/TestAcidGuarantees/05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:41,912 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-17T12:40:41,913 DEBUG [PEWorker-4 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-17T12:40:41,913 DEBUG [PEWorker-4 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-17T12:40:41,919 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412170de20ed96097471f9dac55d17511ec92_05f19a912451e6e90726d599fdf98d6d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412170de20ed96097471f9dac55d17511ec92_05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:41,919 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121707852c86756c49f7a8eefa3b59d968f7_05f19a912451e6e90726d599fdf98d6d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121707852c86756c49f7a8eefa3b59d968f7_05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:41,919 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121714de5feb1f4d40f595a4ce76b99be506_05f19a912451e6e90726d599fdf98d6d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121714de5feb1f4d40f595a4ce76b99be506_05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:41,919 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121713f919758c584934a55635b6f0a39ae7_05f19a912451e6e90726d599fdf98d6d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121713f919758c584934a55635b6f0a39ae7_05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:41,919 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217155ce4d4418946b29238e912190ee5b6_05f19a912451e6e90726d599fdf98d6d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217155ce4d4418946b29238e912190ee5b6_05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:41,920 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121737e9f1a713264c05a3f19c16089ecdb3_05f19a912451e6e90726d599fdf98d6d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121737e9f1a713264c05a3f19c16089ecdb3_05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:41,920 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412172892a7cb8c85451bad1e951a5dbeec74_05f19a912451e6e90726d599fdf98d6d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412172892a7cb8c85451bad1e951a5dbeec74_05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:41,920 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121742725b33117c4dcfaa364362eb302e54_05f19a912451e6e90726d599fdf98d6d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121742725b33117c4dcfaa364362eb302e54_05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:41,921 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412177f9921ca096f4cefb6a6b738138b4f26_05f19a912451e6e90726d599fdf98d6d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412177f9921ca096f4cefb6a6b738138b4f26_05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:41,921 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412176ffdb4e105f8443197d05fa27b337112_05f19a912451e6e90726d599fdf98d6d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412176ffdb4e105f8443197d05fa27b337112_05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:41,921 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217c67e451afdb1410eb4cbee3501ae1197_05f19a912451e6e90726d599fdf98d6d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217c67e451afdb1410eb4cbee3501ae1197_05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:41,921 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217e25e6e8fe82e4665ae9491972a9fb4c7_05f19a912451e6e90726d599fdf98d6d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217e25e6e8fe82e4665ae9491972a9fb4c7_05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:41,921 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217de33a658b3f844709c052f3811d088e3_05f19a912451e6e90726d599fdf98d6d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217de33a658b3f844709c052f3811d088e3_05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:41,921 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217e529123ceeda42759a110c7883bf0595_05f19a912451e6e90726d599fdf98d6d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217e529123ceeda42759a110c7883bf0595_05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:41,921 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217e6dfcdd6150f4ae4be5c4c54bcf473b0_05f19a912451e6e90726d599fdf98d6d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217e6dfcdd6150f4ae4be5c4c54bcf473b0_05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:41,921 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217f01fd69f061641cea1df7062dd44e2c5_05f19a912451e6e90726d599fdf98d6d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217f01fd69f061641cea1df7062dd44e2c5_05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:41,922 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217f68e0ec6cd3041caa8852c8e0c88b572_05f19a912451e6e90726d599fdf98d6d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217f68e0ec6cd3041caa8852c8e0c88b572_05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:41,922 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217f71fbd78f955481f897328c794db82ec_05f19a912451e6e90726d599fdf98d6d to hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241217f71fbd78f955481f897328c794db82ec_05f19a912451e6e90726d599fdf98d6d 2024-12-17T12:40:41,922 DEBUG [PEWorker-4 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-17T12:40:41,924 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=163, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-17T12:40:41,927 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-17T12:40:41,928 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-17T12:40:41,929 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=163, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-17T12:40:41,929 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-17T12:40:41,929 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734439241929"}]},"ts":"9223372036854775807"} 2024-12-17T12:40:41,931 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-17T12:40:41,931 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 05f19a912451e6e90726d599fdf98d6d, NAME => 'TestAcidGuarantees,,1734439211885.05f19a912451e6e90726d599fdf98d6d.', STARTKEY => '', ENDKEY => ''}] 2024-12-17T12:40:41,931 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-17T12:40:41,931 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734439241931"}]},"ts":"9223372036854775807"} 2024-12-17T12:40:41,934 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-17T12:40:41,945 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=163, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-17T12:40:41,946 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 60 msec 2024-12-17T12:40:41,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38693 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-17T12:40:41,992 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 163 completed 2024-12-17T12:40:42,004 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=247 (was 244) - Thread LEAK? -, OpenFileDescriptor=459 (was 454) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=307 (was 329), ProcessCount=11 (was 11), AvailableMemoryMB=3659 (was 3707) 2024-12-17T12:40:42,004 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-17T12:40:42,004 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-17T12:40:42,004 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x47f9cd1b to 127.0.0.1:59557 2024-12-17T12:40:42,005 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:40:42,005 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-17T12:40:42,005 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=702506913, stopped=false 2024-12-17T12:40:42,005 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=681c08bfdbdf,38693,1734439057670 2024-12-17T12:40:42,011 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38693-0x10033fed2a90000, quorum=127.0.0.1:59557, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-17T12:40:42,011 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36491-0x10033fed2a90001, quorum=127.0.0.1:59557, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-17T12:40:42,011 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-17T12:40:42,011 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38693-0x10033fed2a90000, quorum=127.0.0.1:59557, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-17T12:40:42,011 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36491-0x10033fed2a90001, quorum=127.0.0.1:59557, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-17T12:40:42,012 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38693-0x10033fed2a90000, quorum=127.0.0.1:59557, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-17T12:40:42,012 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:40:42,012 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36491-0x10033fed2a90001, quorum=127.0.0.1:59557, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-17T12:40:42,012 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '681c08bfdbdf,36491,1734439058372' ***** 2024-12-17T12:40:42,012 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-17T12:40:42,012 INFO [RS:0;681c08bfdbdf:36491 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-17T12:40:42,012 INFO [RS:0;681c08bfdbdf:36491 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-17T12:40:42,012 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-17T12:40:42,012 INFO [RS:0;681c08bfdbdf:36491 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-17T12:40:42,013 INFO [RS:0;681c08bfdbdf:36491 {}] regionserver.HRegionServer(3579): Received CLOSE for 0747a0e153fecce30e3abad582ed5b21 2024-12-17T12:40:42,013 INFO [RS:0;681c08bfdbdf:36491 {}] regionserver.HRegionServer(1224): stopping server 681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:42,013 DEBUG [RS:0;681c08bfdbdf:36491 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:40:42,013 INFO [RS:0;681c08bfdbdf:36491 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-17T12:40:42,013 INFO [RS:0;681c08bfdbdf:36491 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-17T12:40:42,013 INFO [RS:0;681c08bfdbdf:36491 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-17T12:40:42,013 INFO [RS:0;681c08bfdbdf:36491 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-17T12:40:42,013 INFO [RS:0;681c08bfdbdf:36491 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-12-17T12:40:42,013 DEBUG [RS:0;681c08bfdbdf:36491 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740, 0747a0e153fecce30e3abad582ed5b21=hbase:namespace,,1734439062331.0747a0e153fecce30e3abad582ed5b21.} 2024-12-17T12:40:42,013 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 0747a0e153fecce30e3abad582ed5b21, disabling compactions & flushes 2024-12-17T12:40:42,013 DEBUG [RS_CLOSE_META-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-17T12:40:42,013 INFO [RS_CLOSE_META-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-17T12:40:42,013 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1734439062331.0747a0e153fecce30e3abad582ed5b21. 2024-12-17T12:40:42,013 DEBUG [RS_CLOSE_META-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-17T12:40:42,013 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1734439062331.0747a0e153fecce30e3abad582ed5b21. 2024-12-17T12:40:42,013 DEBUG [RS_CLOSE_META-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-17T12:40:42,013 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1734439062331.0747a0e153fecce30e3abad582ed5b21. after waiting 0 ms 2024-12-17T12:40:42,013 DEBUG [RS_CLOSE_META-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-17T12:40:42,013 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1734439062331.0747a0e153fecce30e3abad582ed5b21. 2024-12-17T12:40:42,014 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 0747a0e153fecce30e3abad582ed5b21 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-17T12:40:42,014 INFO [RS_CLOSE_META-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=20.55 KB heapSize=35.87 KB 2024-12-17T12:40:42,016 DEBUG [RS:0;681c08bfdbdf:36491 {}] regionserver.HRegionServer(1629): Waiting on 0747a0e153fecce30e3abad582ed5b21, 1588230740 2024-12-17T12:40:42,027 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/hbase/namespace/0747a0e153fecce30e3abad582ed5b21/.tmp/info/54767564f1d64014b99fc9bf9e51947b is 45, key is default/info:d/1734439063792/Put/seqid=0 2024-12-17T12:40:42,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742404_1580 (size=5037) 2024-12-17T12:40:42,032 DEBUG [RS_CLOSE_META-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/hbase/meta/1588230740/.tmp/info/d924bf15ea06462ba375c8b7f7acafa7 is 143, key is hbase:namespace,,1734439062331.0747a0e153fecce30e3abad582ed5b21./info:regioninfo/1734439063641/Put/seqid=0 2024-12-17T12:40:42,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742405_1581 (size=7725) 2024-12-17T12:40:42,086 INFO [regionserver/681c08bfdbdf:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-17T12:40:42,216 DEBUG [RS:0;681c08bfdbdf:36491 {}] regionserver.HRegionServer(1629): Waiting on 0747a0e153fecce30e3abad582ed5b21, 1588230740 2024-12-17T12:40:42,417 DEBUG [RS:0;681c08bfdbdf:36491 {}] regionserver.HRegionServer(1629): Waiting on 0747a0e153fecce30e3abad582ed5b21, 1588230740 2024-12-17T12:40:42,432 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/hbase/namespace/0747a0e153fecce30e3abad582ed5b21/.tmp/info/54767564f1d64014b99fc9bf9e51947b 2024-12-17T12:40:42,437 INFO [RS_CLOSE_META-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/hbase/meta/1588230740/.tmp/info/d924bf15ea06462ba375c8b7f7acafa7 2024-12-17T12:40:42,441 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/hbase/namespace/0747a0e153fecce30e3abad582ed5b21/.tmp/info/54767564f1d64014b99fc9bf9e51947b as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/hbase/namespace/0747a0e153fecce30e3abad582ed5b21/info/54767564f1d64014b99fc9bf9e51947b 2024-12-17T12:40:42,445 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/hbase/namespace/0747a0e153fecce30e3abad582ed5b21/info/54767564f1d64014b99fc9bf9e51947b, entries=2, sequenceid=6, filesize=4.9 K 2024-12-17T12:40:42,445 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 0747a0e153fecce30e3abad582ed5b21 in 432ms, sequenceid=6, compaction requested=false 2024-12-17T12:40:42,449 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/hbase/namespace/0747a0e153fecce30e3abad582ed5b21/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-17T12:40:42,450 INFO [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1734439062331.0747a0e153fecce30e3abad582ed5b21. 2024-12-17T12:40:42,450 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 0747a0e153fecce30e3abad582ed5b21: 2024-12-17T12:40:42,450 DEBUG [RS_CLOSE_REGION-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1734439062331.0747a0e153fecce30e3abad582ed5b21. 2024-12-17T12:40:42,459 DEBUG [RS_CLOSE_META-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/hbase/meta/1588230740/.tmp/rep_barrier/69efeee80d1240fe986f28175b851e34 is 102, key is TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7./rep_barrier:/1734439092897/DeleteFamily/seqid=0 2024-12-17T12:40:42,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742406_1582 (size=6025) 2024-12-17T12:40:42,617 DEBUG [RS:0;681c08bfdbdf:36491 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-17T12:40:42,818 DEBUG [RS:0;681c08bfdbdf:36491 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-17T12:40:42,863 INFO [RS_CLOSE_META-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=588 B at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/hbase/meta/1588230740/.tmp/rep_barrier/69efeee80d1240fe986f28175b851e34 2024-12-17T12:40:42,886 DEBUG [RS_CLOSE_META-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/hbase/meta/1588230740/.tmp/table/10136ae81dee43388db6e67e887bb705 is 96, key is TestAcidGuarantees,,1734439064005.502e77060db097ea5decbe44e66ef8e7./table:/1734439092897/DeleteFamily/seqid=0 2024-12-17T12:40:42,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742407_1583 (size=5942) 2024-12-17T12:40:42,891 INFO [regionserver/681c08bfdbdf:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-17T12:40:42,891 INFO [regionserver/681c08bfdbdf:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-17T12:40:43,018 INFO [RS:0;681c08bfdbdf:36491 {}] regionserver.HRegionServer(1599): Waiting on 1 regions to close 2024-12-17T12:40:43,018 DEBUG [RS:0;681c08bfdbdf:36491 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-17T12:40:43,018 DEBUG [RS:0;681c08bfdbdf:36491 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-17T12:40:43,218 DEBUG [RS:0;681c08bfdbdf:36491 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-17T12:40:43,291 INFO [RS_CLOSE_META-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.08 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/hbase/meta/1588230740/.tmp/table/10136ae81dee43388db6e67e887bb705 2024-12-17T12:40:43,302 DEBUG [RS_CLOSE_META-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/hbase/meta/1588230740/.tmp/info/d924bf15ea06462ba375c8b7f7acafa7 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/hbase/meta/1588230740/info/d924bf15ea06462ba375c8b7f7acafa7 2024-12-17T12:40:43,309 INFO [RS_CLOSE_META-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/hbase/meta/1588230740/info/d924bf15ea06462ba375c8b7f7acafa7, entries=22, sequenceid=93, filesize=7.5 K 2024-12-17T12:40:43,310 DEBUG [RS_CLOSE_META-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/hbase/meta/1588230740/.tmp/rep_barrier/69efeee80d1240fe986f28175b851e34 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/hbase/meta/1588230740/rep_barrier/69efeee80d1240fe986f28175b851e34 2024-12-17T12:40:43,314 INFO [RS_CLOSE_META-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/hbase/meta/1588230740/rep_barrier/69efeee80d1240fe986f28175b851e34, entries=6, sequenceid=93, filesize=5.9 K 2024-12-17T12:40:43,315 DEBUG [RS_CLOSE_META-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/hbase/meta/1588230740/.tmp/table/10136ae81dee43388db6e67e887bb705 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/hbase/meta/1588230740/table/10136ae81dee43388db6e67e887bb705 2024-12-17T12:40:43,320 INFO [RS_CLOSE_META-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/hbase/meta/1588230740/table/10136ae81dee43388db6e67e887bb705, entries=9, sequenceid=93, filesize=5.8 K 2024-12-17T12:40:43,321 INFO [RS_CLOSE_META-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~20.55 KB/21040, heapSize ~35.82 KB/36680, currentSize=0 B/0 for 1588230740 in 1308ms, sequenceid=93, compaction requested=false 2024-12-17T12:40:43,326 DEBUG [RS_CLOSE_META-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/data/hbase/meta/1588230740/recovered.edits/96.seqid, newMaxSeqId=96, maxSeqId=1 2024-12-17T12:40:43,326 DEBUG [RS_CLOSE_META-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-17T12:40:43,327 INFO [RS_CLOSE_META-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-17T12:40:43,327 DEBUG [RS_CLOSE_META-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-17T12:40:43,327 DEBUG [RS_CLOSE_META-regionserver/681c08bfdbdf:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-17T12:40:43,418 INFO [RS:0;681c08bfdbdf:36491 {}] regionserver.HRegionServer(1250): stopping server 681c08bfdbdf,36491,1734439058372; all regions closed. 2024-12-17T12:40:43,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741834_1010 (size=26050) 2024-12-17T12:40:43,427 DEBUG [RS:0;681c08bfdbdf:36491 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/oldWALs 2024-12-17T12:40:43,427 INFO [RS:0;681c08bfdbdf:36491 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 681c08bfdbdf%2C36491%2C1734439058372.meta:.meta(num 1734439062032) 2024-12-17T12:40:43,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741832_1008 (size=13043993) 2024-12-17T12:40:43,432 DEBUG [RS:0;681c08bfdbdf:36491 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/oldWALs 2024-12-17T12:40:43,432 INFO [RS:0;681c08bfdbdf:36491 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 681c08bfdbdf%2C36491%2C1734439058372:(num 1734439061052) 2024-12-17T12:40:43,432 DEBUG [RS:0;681c08bfdbdf:36491 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:40:43,432 INFO [RS:0;681c08bfdbdf:36491 {}] regionserver.LeaseManager(133): Closed leases 2024-12-17T12:40:43,433 INFO [RS:0;681c08bfdbdf:36491 {}] hbase.ChoreService(370): Chore service for: regionserver/681c08bfdbdf:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-17T12:40:43,433 INFO [regionserver/681c08bfdbdf:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-17T12:40:43,433 INFO [RS:0;681c08bfdbdf:36491 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:36491 2024-12-17T12:40:43,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36491-0x10033fed2a90001, quorum=127.0.0.1:59557, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/681c08bfdbdf,36491,1734439058372 2024-12-17T12:40:43,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38693-0x10033fed2a90000, quorum=127.0.0.1:59557, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-17T12:40:43,476 ERROR [Time-limited test-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$359/0x00007fe6508f3790@18b1644f rejected from java.util.concurrent.ThreadPoolExecutor@5db3f93d[Shutting down, pool size = 1, active threads = 0, queued tasks = 0, completed tasks = 15] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1360) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-12-17T12:40:43,477 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [681c08bfdbdf,36491,1734439058372] 2024-12-17T12:40:43,477 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 681c08bfdbdf,36491,1734439058372; numProcessing=1 2024-12-17T12:40:43,495 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/681c08bfdbdf,36491,1734439058372 already deleted, retry=false 2024-12-17T12:40:43,495 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 681c08bfdbdf,36491,1734439058372 expired; onlineServers=0 2024-12-17T12:40:43,495 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '681c08bfdbdf,38693,1734439057670' ***** 2024-12-17T12:40:43,495 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-17T12:40:43,496 DEBUG [M:0;681c08bfdbdf:38693 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@73dd2084, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=681c08bfdbdf/172.17.0.2:0 2024-12-17T12:40:43,496 INFO [M:0;681c08bfdbdf:38693 {}] regionserver.HRegionServer(1224): stopping server 681c08bfdbdf,38693,1734439057670 2024-12-17T12:40:43,496 INFO [M:0;681c08bfdbdf:38693 {}] regionserver.HRegionServer(1250): stopping server 681c08bfdbdf,38693,1734439057670; all regions closed. 2024-12-17T12:40:43,496 DEBUG [M:0;681c08bfdbdf:38693 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-17T12:40:43,496 DEBUG [M:0;681c08bfdbdf:38693 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-17T12:40:43,497 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-17T12:40:43,497 DEBUG [M:0;681c08bfdbdf:38693 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-17T12:40:43,497 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster-HFileCleaner.large.0-1734439060726 {}] cleaner.HFileCleaner(306): Exit Thread[master/681c08bfdbdf:0:becomeActiveMaster-HFileCleaner.large.0-1734439060726,5,FailOnTimeoutGroup] 2024-12-17T12:40:43,497 DEBUG [master/681c08bfdbdf:0:becomeActiveMaster-HFileCleaner.small.0-1734439060727 {}] cleaner.HFileCleaner(306): Exit Thread[master/681c08bfdbdf:0:becomeActiveMaster-HFileCleaner.small.0-1734439060727,5,FailOnTimeoutGroup] 2024-12-17T12:40:43,497 INFO [M:0;681c08bfdbdf:38693 {}] hbase.ChoreService(370): Chore service for: master/681c08bfdbdf:0 had [] on shutdown 2024-12-17T12:40:43,498 DEBUG [M:0;681c08bfdbdf:38693 {}] master.HMaster(1733): Stopping service threads 2024-12-17T12:40:43,498 INFO [M:0;681c08bfdbdf:38693 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-17T12:40:43,499 INFO [M:0;681c08bfdbdf:38693 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-17T12:40:43,499 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-17T12:40:43,503 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38693-0x10033fed2a90000, quorum=127.0.0.1:59557, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-17T12:40:43,503 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38693-0x10033fed2a90000, quorum=127.0.0.1:59557, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-17T12:40:43,503 DEBUG [M:0;681c08bfdbdf:38693 {}] zookeeper.ZKUtil(347): master:38693-0x10033fed2a90000, quorum=127.0.0.1:59557, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-17T12:40:43,503 WARN [M:0;681c08bfdbdf:38693 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-17T12:40:43,503 INFO [M:0;681c08bfdbdf:38693 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-17T12:40:43,504 INFO [M:0;681c08bfdbdf:38693 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-17T12:40:43,504 DEBUG [M:0;681c08bfdbdf:38693 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-17T12:40:43,504 INFO [M:0;681c08bfdbdf:38693 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-17T12:40:43,504 DEBUG [M:0;681c08bfdbdf:38693 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-17T12:40:43,504 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38693-0x10033fed2a90000, quorum=127.0.0.1:59557, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-17T12:40:43,504 DEBUG [M:0;681c08bfdbdf:38693 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-17T12:40:43,504 DEBUG [M:0;681c08bfdbdf:38693 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-17T12:40:43,504 INFO [M:0;681c08bfdbdf:38693 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=745.84 KB heapSize=915.92 KB 2024-12-17T12:40:43,523 DEBUG [M:0;681c08bfdbdf:38693 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4cfea435c5544f478005873a2b7a4db4 is 82, key is hbase:meta,,1/info:regioninfo/1734439062142/Put/seqid=0 2024-12-17T12:40:43,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742408_1584 (size=5672) 2024-12-17T12:40:43,587 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36491-0x10033fed2a90001, quorum=127.0.0.1:59557, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-17T12:40:43,587 INFO [RS:0;681c08bfdbdf:36491 {}] regionserver.HRegionServer(1307): Exiting; stopping=681c08bfdbdf,36491,1734439058372; zookeeper connection closed. 2024-12-17T12:40:43,587 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36491-0x10033fed2a90001, quorum=127.0.0.1:59557, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-17T12:40:43,587 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3320df55 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3320df55 2024-12-17T12:40:43,588 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-17T12:40:43,927 INFO [M:0;681c08bfdbdf:38693 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=2100 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4cfea435c5544f478005873a2b7a4db4 2024-12-17T12:40:43,957 DEBUG [M:0;681c08bfdbdf:38693 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2b8ad20a831445cda19c042b135eab0e is 2285, key is \x00\x00\x00\x00\x00\x00\x00\x8A/proc:d/1734439215009/Put/seqid=0 2024-12-17T12:40:43,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742409_1585 (size=42331) 2024-12-17T12:40:44,361 INFO [M:0;681c08bfdbdf:38693 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=745.28 KB at sequenceid=2100 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2b8ad20a831445cda19c042b135eab0e 2024-12-17T12:40:44,368 INFO [M:0;681c08bfdbdf:38693 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 2b8ad20a831445cda19c042b135eab0e 2024-12-17T12:40:44,383 DEBUG [M:0;681c08bfdbdf:38693 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ead86c9c9d914d6ea0454b2689df8d27 is 69, key is 681c08bfdbdf,36491,1734439058372/rs:state/1734439060818/Put/seqid=0 2024-12-17T12:40:44,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073742410_1586 (size=5156) 2024-12-17T12:40:44,788 INFO [M:0;681c08bfdbdf:38693 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=2100 (bloomFilter=true), to=hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ead86c9c9d914d6ea0454b2689df8d27 2024-12-17T12:40:44,798 DEBUG [M:0;681c08bfdbdf:38693 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4cfea435c5544f478005873a2b7a4db4 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4cfea435c5544f478005873a2b7a4db4 2024-12-17T12:40:44,801 INFO [M:0;681c08bfdbdf:38693 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4cfea435c5544f478005873a2b7a4db4, entries=8, sequenceid=2100, filesize=5.5 K 2024-12-17T12:40:44,802 DEBUG [M:0;681c08bfdbdf:38693 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2b8ad20a831445cda19c042b135eab0e as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/2b8ad20a831445cda19c042b135eab0e 2024-12-17T12:40:44,805 INFO [M:0;681c08bfdbdf:38693 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 2b8ad20a831445cda19c042b135eab0e 2024-12-17T12:40:44,806 INFO [M:0;681c08bfdbdf:38693 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/2b8ad20a831445cda19c042b135eab0e, entries=163, sequenceid=2100, filesize=41.3 K 2024-12-17T12:40:44,806 DEBUG [M:0;681c08bfdbdf:38693 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ead86c9c9d914d6ea0454b2689df8d27 as hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ead86c9c9d914d6ea0454b2689df8d27 2024-12-17T12:40:44,810 INFO [M:0;681c08bfdbdf:38693 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38223/user/jenkins/test-data/4ee8a3fe-f0de-dfd4-da60-8e9cd74d86c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ead86c9c9d914d6ea0454b2689df8d27, entries=1, sequenceid=2100, filesize=5.0 K 2024-12-17T12:40:44,810 INFO [M:0;681c08bfdbdf:38693 {}] regionserver.HRegion(3040): Finished flush of dataSize ~745.84 KB/763739, heapSize ~915.63 KB/937600, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1306ms, sequenceid=2100, compaction requested=false 2024-12-17T12:40:44,812 INFO [M:0;681c08bfdbdf:38693 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-17T12:40:44,812 DEBUG [M:0;681c08bfdbdf:38693 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-17T12:40:44,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35695 is added to blk_1073741830_1006 (size=901414) 2024-12-17T12:40:44,814 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-17T12:40:44,814 INFO [M:0;681c08bfdbdf:38693 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-17T12:40:44,814 INFO [M:0;681c08bfdbdf:38693 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:38693 2024-12-17T12:40:44,825 DEBUG [M:0;681c08bfdbdf:38693 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/681c08bfdbdf,38693,1734439057670 already deleted, retry=false 2024-12-17T12:40:44,934 INFO [M:0;681c08bfdbdf:38693 {}] regionserver.HRegionServer(1307): Exiting; stopping=681c08bfdbdf,38693,1734439057670; zookeeper connection closed. 2024-12-17T12:40:44,934 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38693-0x10033fed2a90000, quorum=127.0.0.1:59557, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-17T12:40:44,934 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38693-0x10033fed2a90000, quorum=127.0.0.1:59557, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-17T12:40:44,943 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5e967c25{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-17T12:40:44,947 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1e95663c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-17T12:40:44,947 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-17T12:40:44,947 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@637efe93{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-17T12:40:44,947 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6f3c9073{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/63e851ed-2e94-2a83-69e0-0dcd439348a2/hadoop.log.dir/,STOPPED} 2024-12-17T12:40:44,951 WARN [BP-1712470660-172.17.0.2-1734439054599 heartbeating to localhost/127.0.0.1:38223 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-17T12:40:44,951 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-17T12:40:44,951 WARN [BP-1712470660-172.17.0.2-1734439054599 heartbeating to localhost/127.0.0.1:38223 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1712470660-172.17.0.2-1734439054599 (Datanode Uuid 8a8aa23a-bee5-4781-95e2-e35bdb010347) service to localhost/127.0.0.1:38223 2024-12-17T12:40:44,951 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-17T12:40:44,953 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/63e851ed-2e94-2a83-69e0-0dcd439348a2/cluster_e84f10d9-83a3-7112-3c7b-cf7e72d3a51d/dfs/data/data1/current/BP-1712470660-172.17.0.2-1734439054599 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-17T12:40:44,953 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/63e851ed-2e94-2a83-69e0-0dcd439348a2/cluster_e84f10d9-83a3-7112-3c7b-cf7e72d3a51d/dfs/data/data2/current/BP-1712470660-172.17.0.2-1734439054599 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-17T12:40:44,954 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-17T12:40:44,961 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@52042c53{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-17T12:40:44,961 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@45e9671d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-17T12:40:44,961 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-17T12:40:44,962 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8167a4c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-17T12:40:44,962 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@625ac51e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/63e851ed-2e94-2a83-69e0-0dcd439348a2/hadoop.log.dir/,STOPPED} 2024-12-17T12:40:44,974 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-17T12:40:45,081 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down